code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from math import sqrt
def a_ ( _lowerCAmelCase : int = 100_0000 ):
'''simple docstring'''
lowercase__ : int = 0
lowercase__ : int = 0
lowercase__ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 599 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def a_ ( ):
'''simple docstring'''
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_lowerCAmelCase , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_lowerCAmelCase , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_lowerCAmelCase , help='where to store parsed gold_data_path file' , )
lowercase__ : Union[str, Any] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowercase__ : List[str] = json.load(_lowerCAmelCase )
for dpr_record in tqdm(_lowerCAmelCase ):
lowercase__ : Any = dpr_record['question']
lowercase__ : Optional[Any] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_lowerCAmelCase ) + '\n' )
if __name__ == "__main__":
main()
| 599 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __a , __a=100 , __a=13 , __a=30 , __a=2 , __a=3 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=3 , ):
__lowerCamelCase : str = parent
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = num_channels
__lowerCamelCase : Union[str, Any] = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : Dict = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : int = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : List[str] = (image_size // patch_size) ** 2
__lowerCamelCase : Union[str, Any] = num_patches + 1
def snake_case_ ( self ):
__lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Tuple = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : str = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def snake_case_ ( self , __a , __a , __a ):
__lowerCamelCase : Any = FlaxBeitModel(config=__a )
__lowerCamelCase : Union[str, Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , __a , __a , __a ):
__lowerCamelCase : Union[str, Any] = FlaxBeitForMaskedImageModeling(config=__a )
__lowerCamelCase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def snake_case_ ( self , __a , __a , __a ):
__lowerCamelCase : str = self.type_sequence_label_size
__lowerCamelCase : int = FlaxBeitForImageClassification(config=__a )
__lowerCamelCase : int = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : Dict = 1
__lowerCamelCase : List[Any] = FlaxBeitForImageClassification(__a )
__lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = model(__a )
def snake_case_ ( self ):
__lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Optional[int] = config_and_inputs
__lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowercase( lowercase__ , unittest.TestCase ):
'''simple docstring'''
__a : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def snake_case_ ( self ):
__lowerCamelCase : str = FlaxBeitModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[int] = model_class(__a )
__lowerCamelCase : List[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[str] = [*signature.parameters.keys()]
__lowerCamelCase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def snake_case_ ( self ):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : List[str] = self._prepare_for_class(__a , __a )
__lowerCamelCase : List[Any] = model_class(__a )
@jax.jit
def model_jitted(__a , **__a ):
return model(pixel_values=__a , **__a )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Union[str, Any] = model_jitted(**__a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : str = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case_ ( self ):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case_ ( self ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def snake_case_ ( self ):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def snake_case_ ( self ):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[str] = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
__lowerCamelCase : str = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
def UpperCAmelCase ( ) -> Any:
__lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_ ( self ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def snake_case_ ( self ):
__lowerCamelCase : List[str] = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
__lowerCamelCase : int = self.default_image_processor
__lowerCamelCase : Tuple = prepare_img()
__lowerCamelCase : str = image_processor(images=__a , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
__lowerCamelCase : int = np.ones((1, 196) , dtype=__a )
# forward pass
__lowerCamelCase : Any = model(pixel_values=__a , bool_masked_pos=__a )
__lowerCamelCase : List[Any] = outputs.logits
# verify the logits
__lowerCamelCase : List[Any] = (1, 196, 8192)
self.assertEqual(logits.shape , __a )
__lowerCamelCase : str = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , __a , atol=1E-2 ) )
@slow
def snake_case_ ( self ):
__lowerCamelCase : int = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : Dict = prepare_img()
__lowerCamelCase : Optional[int] = image_processor(images=__a , return_tensors='np' )
# forward pass
__lowerCamelCase : Dict = model(**__a )
__lowerCamelCase : int = outputs.logits
# verify the logits
__lowerCamelCase : Optional[int] = (1, 1000)
self.assertEqual(logits.shape , __a )
__lowerCamelCase : List[str] = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , __a , atol=1E-4 ) )
__lowerCamelCase : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , __a )
@slow
def snake_case_ ( self ):
__lowerCamelCase : Union[str, Any] = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : Optional[int] = prepare_img()
__lowerCamelCase : Optional[int] = image_processor(images=__a , return_tensors='np' )
# forward pass
__lowerCamelCase : str = model(**__a )
__lowerCamelCase : List[str] = outputs.logits
# verify the logits
__lowerCamelCase : Union[str, Any] = (1, 21841)
self.assertEqual(logits.shape , __a )
__lowerCamelCase : Any = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , __a , atol=1E-4 ) )
__lowerCamelCase : Tuple = 2396
self.assertEqual(logits.argmax(-1 ).item() , __a )
| 263 |
"""simple docstring"""
import random
from typing import Any
def UpperCAmelCase ( A__: list ) -> list[Any]:
for _ in range(len(A__ ) ):
__lowerCamelCase : List[Any] = random.randint(0 , len(A__ ) - 1 )
__lowerCamelCase : Optional[Any] = random.randint(0 , len(A__ ) - 1 )
__lowerCamelCase , __lowerCamelCase : Any = data[b], data[a]
return data
if __name__ == "__main__":
a_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
a_ : int = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 263 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any]=None , lowercase : Dict=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowercase )
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = field(
metadata={'''help''': '''The csv file to plot.'''} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
UpperCamelCase = list_field(
default=UpperCamelCase , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
try:
int(lowercase )
return True
except ValueError:
return False
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
try:
float(lowercase )
return True
except ValueError:
return False
class A:
'''simple docstring'''
def __init__( self : Optional[int] , A_ : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = args
lowerCamelCase_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
lowerCamelCase_ = csv.DictReader(A_ )
for row in reader:
lowerCamelCase_ = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
lowerCamelCase_ = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
lowerCamelCase_ = float(row['result'] )
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = plt.subplots()
lowerCamelCase_ = 'Time usage' if self.args.is_time else 'Memory usage'
lowerCamelCase_ = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCamelCase_ = sorted(set(self.result_dict[model_name]['bsz'] ) )
lowerCamelCase_ = sorted(set(self.result_dict[model_name]['seq_len'] ) )
lowerCamelCase_ = self.result_dict[model_name]['result']
((lowerCamelCase_) , (lowerCamelCase_)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCamelCase_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCamelCase_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=A_ , )
else:
lowerCamelCase_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowerCamelCase_) , (lowerCamelCase_)) = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
lowerCamelCase_ = np.asarray(A_ , A_ )[: len(A_ )]
plt.scatter(
A_ , A_ , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(A_ , A_ , '--' )
title_str += f""" {label_model_name} vs."""
lowerCamelCase_ = title_str[:-4]
lowerCamelCase_ = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(A_ )
plt.xlabel(A_ )
plt.ylabel(A_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = HfArgumentParser(lowercase )
lowerCamelCase_ = parser.parse_args_into_dataclasses()[0]
lowerCamelCase_ = Plot(args=lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 70 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = LxmertTokenizer
__UpperCamelCase = LxmertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
super().setUp()
a__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : int , A__ : int ) -> int:
'''simple docstring'''
a__ : List[Any] = '''UNwant\u00E9d,running'''
a__ : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.tokenizer_class(self.vocab_file )
a__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [7, 4, 5, 1_0, 8, 9] )
def __lowerCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Union[str, Any] = self.get_rust_tokenizer()
a__ : str = '''I was born in 92000, and this is falsé.'''
a__ : Tuple = tokenizer.tokenize(A__ )
a__ : Tuple = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
a__ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
a__ : Optional[Any] = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : str = tokenizer.encode(A__ )
a__ : int = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
| 688 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCAmelCase : Optional[int] = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Optional[int] = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ["MaskFormerFeatureExtractor"]
_lowerCAmelCase : Dict = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
_lowerCAmelCase : List[str] = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 364 | 0 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCAmelCase__ = '''▁'''
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ['input_ids', 'attention_mask']
def __init__( self : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any]="</s>" ,lowercase__ : Union[str, Any]="<unk>" ,lowercase__ : int="<pad>" ,lowercase__ : Dict=1_0_0 ,lowercase__ : Optional[int]=None ,lowercase__ : Optional[Dict[str, Any]] = None ,lowercase__ : Optional[Any]=True ,**lowercase__ : Optional[int] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowercase = [F"<extra_id_{i}>" for i in range(lowercase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowercase = len(set(filter(lambda lowercase__ : bool('''extra_id''' in str(lowercase__ ) ) ,lowercase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
__lowercase = legacy
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,extra_ids=lowercase__ ,additional_special_tokens=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,legacy=lowercase__ ,**lowercase__ ,)
__lowercase = vocab_file
__lowercase = extra_ids
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__lowercase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' ,lowercase__ ,)
return max_model_length
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase__ )) + [1]
return ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return list(
set(filter(lambda lowercase__ : bool(re.search(r'''<extra_id_\d+>''' ,lowercase__ ) ) is not None ,self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return [self._convert_token_to_id(lowercase__ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[int] ):
if len(lowercase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = self._add_eos_if_not_present(lowercase__ )
if token_ids_a is None:
return token_ids_a
else:
__lowercase = self._add_eos_if_not_present(lowercase__ )
return token_ids_a + token_ids_a
def __getstate__( self : List[str] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : List[Any] ,lowercase__ : List[Any] ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : "TextInput" ,**lowercase__ : Any ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__lowercase = SPIECE_UNDERLINE + text.replace(lowercase__ ,''' ''' )
return super().tokenize(lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,**lowercase__ : str ):
if not self.legacy:
__lowercase = text.startswith(lowercase__ )
if is_first:
__lowercase = text[1:]
__lowercase = self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(lowercase__ ):
__lowercase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ):
if token.startswith('''<extra_id_''' ):
__lowercase = re.match(r'''<extra_id_(\d+)>''' ,lowercase__ )
__lowercase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Any ):
if index < self.sp_model.get_piece_size():
__lowercase = self.sp_model.IdToPiece(lowercase__ )
else:
__lowercase = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Any ):
__lowercase = []
__lowercase = ''''''
__lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
__lowercase = True
__lowercase = []
else:
current_sub_tokens.append(lowercase__ )
__lowercase = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 41 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A (__UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """LayoutLMv3ImageProcessor"""
_SCREAMING_SNAKE_CASE = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ) -> Tuple:
'''simple docstring'''
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
_snake_case : int = kwargs.pop('''feature_extractor''' )
_snake_case : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
_snake_case : Tuple = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
_snake_case : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_snake_case : Any = features['''words''']
_snake_case : Union[str, Any] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
_snake_case : Any = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
_snake_case : Tuple = self.get_overflowing_images(lowercase_ , encoded_inputs['''overflow_to_sample_mapping'''] )
_snake_case : List[str] = images
return encoded_inputs
def __a ( self , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
_snake_case : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __a ( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __a ( self , *lowercase_ , **lowercase_ ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __a ( self ) -> int:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
@property
def __a ( self ) -> str:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase_ , )
return self.image_processor
| 326 | 0 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase ( nn.Module ):
__UpperCamelCase =42
__UpperCamelCase =jnp.floataa
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = hidden_states.shape
SCREAMING_SNAKE_CASE = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
SCREAMING_SNAKE_CASE = self.conv(__lowerCamelCase )
return hidden_states
class UpperCamelCase ( nn.Module ):
__UpperCamelCase =42
__UpperCamelCase =jnp.floataa
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.conv(__lowerCamelCase )
return hidden_states
class UpperCamelCase ( nn.Module ):
__UpperCamelCase =42
__UpperCamelCase =None
__UpperCamelCase =0.0
__UpperCamelCase =None
__UpperCamelCase =jnp.floataa
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE = nn.Dense(__lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[int] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any]=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = hidden_states
SCREAMING_SNAKE_CASE = self.norma(__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.swish(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.conva(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
SCREAMING_SNAKE_CASE = hidden_states + temb
SCREAMING_SNAKE_CASE = self.norma(__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.swish(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.dropout(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 702 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 0 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
snake_case = True
except ImportError:
snake_case = False
try:
from torch.hub import _get_torch_home
snake_case = _get_torch_home()
except ImportError:
snake_case = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
snake_case = os.path.join(torch_cache_home, '''transformers''')
snake_case = '''https://cdn.huggingface.co'''
snake_case = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
snake_case = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
snake_case = os.path.join(PATH, '''config.yaml''')
snake_case = os.path.join(PATH, '''attributes.txt''')
snake_case = os.path.join(PATH, '''objects.txt''')
snake_case = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
snake_case = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
snake_case = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
snake_case = '''pytorch_model.bin'''
snake_case = '''config.yaml'''
def snake_case ( lowerCAmelCase_=OBJECTS , lowerCAmelCase_=ATTRIBUTES ) -> Optional[int]:
_snake_case = []
with open(lowerCAmelCase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
_snake_case = []
with open(lowerCAmelCase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def snake_case ( lowerCAmelCase_ ) -> int:
_snake_case = OrderedDict()
with open(lowerCAmelCase_ , '''rb''' ) as f:
_snake_case = pkl.load(lowerCAmelCase_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
_snake_case = ckp.pop(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , np.ndarray ):
_snake_case = torch.tensor(lowerCAmelCase_ )
else:
assert isinstance(lowerCAmelCase_ , torch.tensor ), type(lowerCAmelCase_ )
_snake_case = v
return r
class UpperCAmelCase :
A__ : int = {}
def __init__( self : Optional[int] , __lowerCamelCase : dict , __lowerCamelCase : str = "root" , __lowerCamelCase : Any=0 ):
"""simple docstring"""
_snake_case = name
_snake_case = level
_snake_case = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_snake_case = copy.deepcopy(__lowerCamelCase )
_snake_case = copy.deepcopy(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = Config(__lowerCamelCase , name=__lowerCamelCase , level=level + 1 )
_snake_case = v
setattr(self , __lowerCamelCase , __lowerCamelCase )
_snake_case = d
def __repr__( self : Optional[Any] ):
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = val
_snake_case = val
_snake_case = key.split('''.''' )
_snake_case = len(__lowerCamelCase ) - 1
_snake_case = self._pointer
if len(__lowerCamelCase ) > 1:
for i, l in enumerate(__lowerCamelCase ):
if hasattr(self , __lowerCamelCase ) and isinstance(getattr(self , __lowerCamelCase ) , __lowerCamelCase ):
setattr(getattr(self , __lowerCamelCase ) , '''.'''.join(levels[i:] ) , __lowerCamelCase )
if l == last_level:
_snake_case = val
else:
_snake_case = pointer[l]
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return self._pointer
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int ):
"""simple docstring"""
with open(f"""{file_name}""" , '''w''' ) as stream:
dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ):
"""simple docstring"""
with open(f"""{file_name}""" , '''w''' ) as stream:
json.dump(__lowerCamelCase , __lowerCamelCase )
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : List[str] ):
"""simple docstring"""
with open(__lowerCamelCase ) as stream:
_snake_case = load(__lowerCamelCase , Loader=__lowerCamelCase )
return data
def __str__( self : Tuple ):
"""simple docstring"""
_snake_case = ''' '''
if self._name != "root":
_snake_case = f"""{t * (self._level-1)}{self._name}:\n"""
else:
_snake_case = ''''''
_snake_case = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowerCamelCase ).__name__})\n"""
_snake_case = level
return r[:-1]
@classmethod
def __UpperCAmelCase ( cls : Any , __lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case , _snake_case = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
return cls(__lowerCamelCase )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , __lowerCamelCase : str , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_snake_case = kwargs.pop('''cache_dir''' , __lowerCamelCase )
_snake_case = kwargs.pop('''force_download''' , __lowerCamelCase )
_snake_case = kwargs.pop('''resume_download''' , __lowerCamelCase )
_snake_case = kwargs.pop('''proxies''' , __lowerCamelCase )
_snake_case = kwargs.pop('''local_files_only''' , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
_snake_case = os.path.join(__lowerCamelCase , __lowerCamelCase )
elif os.path.isfile(__lowerCamelCase ) or is_remote_url(__lowerCamelCase ):
_snake_case = pretrained_model_name_or_path
else:
_snake_case = hf_bucket_url(__lowerCamelCase , filename=__lowerCamelCase , use_cdn=__lowerCamelCase )
try:
# Load from URL or cache if already cached
_snake_case = cached_path(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , local_files_only=__lowerCamelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_snake_case = Config.load_yaml(__lowerCamelCase )
except EnvironmentError:
_snake_case = '''Can\'t load config for'''
raise EnvironmentError(__lowerCamelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(__lowerCamelCase ), kwargs
def snake_case ( lowerCAmelCase_ ) -> Optional[int]:
_snake_case = torch.load('''dump.pt''' , map_location=in_tensor.device )
_snake_case = in_tensor.numpy()
_snake_case = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , rtol=0.01 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def snake_case ( lowerCAmelCase_ ) -> str:
_snake_case = urlparse(lowerCAmelCase_ )
return parsed.scheme in ("http", "https")
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> str:
_snake_case = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_snake_case = '''/''' not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=0 , lowerCAmelCase_=None , ) -> Optional[Any]:
_snake_case = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase_ , lowerCAmelCase_ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
ua += "; " + user_agent
_snake_case = {'''user-agent''': ua}
if resume_size > 0:
_snake_case = '''bytes=%d-''' % (resume_size,)
_snake_case = requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ , proxies=lowerCAmelCase_ , headers=lowerCAmelCase_ )
if response.status_code == 416: # Range not satisfiable
return
_snake_case = response.headers.get('''Content-Length''' )
_snake_case = resume_size + int(lowerCAmelCase_ ) if content_length is not None else None
_snake_case = tqdm(
unit='''B''' , unit_scale=lowerCAmelCase_ , total=lowerCAmelCase_ , initial=lowerCAmelCase_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase_ ) )
temp_file.write(lowerCAmelCase_ )
progress.close()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=10 , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=False , ) -> Dict:
if cache_dir is None:
_snake_case = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = str(lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_snake_case = None
if not local_files_only:
try:
_snake_case = requests.head(lowerCAmelCase_ , allow_redirects=lowerCAmelCase_ , proxies=lowerCAmelCase_ , timeout=lowerCAmelCase_ )
if response.status_code == 200:
_snake_case = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_snake_case = url_to_filename(lowerCAmelCase_ , lowerCAmelCase_ )
# get cache path to put the file
_snake_case = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase_ ):
return cache_path
else:
_snake_case = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase_ ) > 0:
return os.path.join(lowerCAmelCase_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_snake_case = cache_path + '''.lock'''
with FileLock(lowerCAmelCase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_snake_case = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase_ , '''a+b''' ) as f:
yield f
_snake_case = _resumable_file_manager
if os.path.exists(lowerCAmelCase_ ):
_snake_case = os.stat(lowerCAmelCase_ ).st_size
else:
_snake_case = 0
else:
_snake_case = partial(tempfile.NamedTemporaryFile , dir=lowerCAmelCase_ , delete=lowerCAmelCase_ )
_snake_case = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , lowerCAmelCase_ , temp_file.name , )
http_get(
lowerCAmelCase_ , lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_size=lowerCAmelCase_ , user_agent=lowerCAmelCase_ , )
os.replace(temp_file.name , lowerCAmelCase_ )
_snake_case = {'''url''': url, '''etag''': etag}
_snake_case = cache_path + '''.json'''
with open(lowerCAmelCase_ , '''w''' ) as meta_file:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
return cache_path
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=None ) -> int:
_snake_case = url.encode('''utf-8''' )
_snake_case = shaaaa(lowerCAmelCase_ )
_snake_case = url_hash.hexdigest()
if etag:
_snake_case = etag.encode('''utf-8''' )
_snake_case = shaaaa(lowerCAmelCase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Optional[Any]:
if cache_dir is None:
_snake_case = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = str(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = str(lowerCAmelCase_ )
if is_remote_url(lowerCAmelCase_ ):
# URL, so get it from the cache (downloading if necessary)
_snake_case = get_from_cache(
lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , user_agent=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
elif os.path.exists(lowerCAmelCase_ ):
# File, and it exists.
_snake_case = url_or_filename
elif urlparse(lowerCAmelCase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase_ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase_ ) and not tarfile.is_tarfile(lowerCAmelCase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_snake_case , _snake_case = os.path.split(lowerCAmelCase_ )
_snake_case = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
_snake_case = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isdir(lowerCAmelCase_ ) and os.listdir(lowerCAmelCase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_snake_case = output_path + '''.lock'''
with FileLock(lowerCAmelCase_ ):
shutil.rmtree(lowerCAmelCase_ , ignore_errors=lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_ )
if is_zipfile(lowerCAmelCase_ ):
with ZipFile(lowerCAmelCase_ , '''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase_ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase_ ):
_snake_case = tarfile.open(lowerCAmelCase_ )
tar_file.extractall(lowerCAmelCase_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase_ ) )
return output_path_extracted
return output_path
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_="," ) -> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ):
with open(lowerCAmelCase_ ) as f:
_snake_case = eval(f.read() )
else:
_snake_case = requests.get(lowerCAmelCase_ )
try:
_snake_case = requests.json()
except Exception:
_snake_case = req.content.decode()
assert data is not None, "could not connect"
try:
_snake_case = eval(lowerCAmelCase_ )
except Exception:
_snake_case = data.split('''\n''' )
req.close()
return data
def snake_case ( lowerCAmelCase_ ) -> List[str]:
_snake_case = requests.get(lowerCAmelCase_ )
_snake_case = np.array(Image.open(BytesIO(response.content ) ) )
return img
def snake_case ( lowerCAmelCase_ ) -> List[str]:
_snake_case = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''rb''' ) as stream:
_snake_case = pkl.load(lowerCAmelCase_ )
_snake_case = weights.pop('''model''' )
_snake_case = {}
for k, v in model.items():
_snake_case = torch.from_numpy(lowerCAmelCase_ )
if "running_var" in k:
_snake_case = torch.tensor([0] )
_snake_case = k.replace('''running_var''' , '''num_batches_tracked''' )
_snake_case = zero
return new
def snake_case ( ) -> str:
print(f"""{os.path.abspath(os.path.join(lowerCAmelCase_ , os.pardir ) )}/demo.ipynb""" )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_="RGB" ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ):
_snake_case = cva.imread(lowerCAmelCase_ )
else:
_snake_case = get_image_from_url(lowerCAmelCase_ )
assert img is not None, f"""could not connect to: {im}"""
_snake_case = cva.cvtColor(lowerCAmelCase_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_snake_case = img[:, :, ::-1]
return img
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Tuple:
return (images[i : i + batch] for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ))
| 103 |
"""simple docstring"""
from math import sqrt
def snake_case ( lowerCAmelCase_ = 1000000 ) -> int:
_snake_case = 0
_snake_case = 0
_snake_case = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCAmelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 103 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
__snake_case = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : Dict, _lowerCAmelCase : Dict=8 ):
"""simple docstring"""
_a = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_a = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]:
super().__init__()
self.register_modules(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , movq=__UpperCAmelCase , )
_a = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
if latents is None:
_a = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_a = latents.to(__UpperCAmelCase )
_a = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self , __UpperCAmelCase=0 ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_a = torch.device(F'cuda:{gpu_id}' )
_a = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_a = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_a = None
for cpu_offloaded_model in [self.unet, self.movq]:
_a , _a = cpu_offload_with_hook(__UpperCAmelCase , __UpperCAmelCase , prev_module_hook=__UpperCAmelCase )
# We'll offload the last model manually.
_a = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ) -> int:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCAmelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 512 , __UpperCAmelCase = 512 , __UpperCAmelCase = 100 , __UpperCAmelCase = 4.0 , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , ) -> str:
_a = self._execution_device
_a = guidance_scale > 1.0
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_a = torch.cat(__UpperCAmelCase , dim=0 )
_a = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_a = torch.cat(__UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
_a = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
_a = negative_image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
_a = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase )
_a = self.scheduler.timesteps
_a = self.unet.config.in_channels
_a , _a = downscale_height_and_width(__UpperCAmelCase , __UpperCAmelCase , self.movq_scale_factor )
# create initial latent
_a = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = {'''image_embeds''': image_embeds}
_a = self.unet(
sample=__UpperCAmelCase , timestep=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , added_cond_kwargs=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0]
if do_classifier_free_guidance:
_a , _a = noise_pred.split(latents.shape[1] , dim=1 )
_a , _a = noise_pred.chunk(2 )
_a , _a = variance_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_a = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_a , _a = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase , )[0]
# post-processing
_a = self.movq.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_a = image * 0.5 + 0.5
_a = image.clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_a = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 285 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''spiece.model'''}
__snake_case = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
__snake_case = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__snake_case = '''▁'''
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Optional[int] = VOCAB_FILES_NAMES
A_ : int = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_a = (
AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else mask_token
)
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> str:
return len(self.sp_model )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> int:
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self , __UpperCAmelCase ) -> Tuple:
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
if self.remove_space:
_a = ''' '''.join(inputs.strip().split() )
else:
_a = inputs
_a = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_a = unicodedata.normalize('''NFKD''' , __UpperCAmelCase )
_a = ''''''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_a = outputs.lower()
return outputs
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_a = self.preprocess_text(__UpperCAmelCase )
_a = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_a = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_a = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a = cur_pieces[1:]
else:
_a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> int:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
_a = []
_a = ''''''
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
_a = True
_a = []
else:
current_sub_tokens.append(__UpperCAmelCase )
_a = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 285 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '▁'
_lowercase = {'vocab_file': 'spiece.model'}
_lowercase = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
_lowercase = {
'google/reformer-crime-and-punishment': 524_288,
}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __a : Optional[Any] , __a : Optional[Any]="</s>" , __a : Optional[int]="<unk>" , __a : Dict=[] , __a : Optional[Dict[str, Any]] = None , **__a : Dict , ):
'''simple docstring'''
lowerCamelCase__: Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__a , unk_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
lowerCamelCase__: List[str] = vocab_file
lowerCamelCase__: Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: Any = self.__dict__.copy()
lowerCamelCase__: Optional[Any] = None
return state
def __setstate__( self : int , __a : List[Any] ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase__: int = {}
lowerCamelCase__: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : Dict , __a : str ):
'''simple docstring'''
return self.sp_model.encode(__a , out_type=__a )
def lowerCamelCase_ ( self : Any , __a : List[Any] ):
'''simple docstring'''
return self.sp_model.piece_to_id(__a )
def lowerCamelCase_ ( self : Union[str, Any] , __a : Tuple ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
lowerCamelCase__: Optional[Any] = self.sp_model.IdToPiece(__a )
return token
def lowerCamelCase_ ( self : Dict , __a : Any ):
'''simple docstring'''
lowerCamelCase__: List[Any] = []
lowerCamelCase__: str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__a ) + token
lowerCamelCase__: List[str] = []
else:
current_sub_tokens.append(__a )
out_string += self.sp_model.decode(__a )
return out_string.strip()
def lowerCamelCase_ ( self : Optional[Any] , __a : str , __a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase__: int = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
lowerCamelCase__: Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 306 |
from __future__ import annotations
_lowercase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowercase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __lowerCAmelCase ( _UpperCamelCase ) -> list[float]:
'''simple docstring'''
lowerCamelCase__: str = []
lowerCamelCase__: List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowerCamelCase__: float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowerCamelCase__: Dict = arr[j]
break
result.append(_UpperCamelCase )
return result
def __lowerCAmelCase ( _UpperCamelCase ) -> list[float]:
'''simple docstring'''
lowerCamelCase__: Tuple = []
for i, outer in enumerate(_UpperCamelCase ):
lowerCamelCase__: float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCamelCase__: Dict = inner
break
result.append(_UpperCamelCase )
return result
def __lowerCAmelCase ( _UpperCamelCase ) -> list[float]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] = len(_UpperCamelCase )
lowerCamelCase__: list[float] = []
lowerCamelCase__: list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCamelCase__: Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowercase = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 306 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''PerceiverFeatureExtractor''']
SCREAMING_SNAKE_CASE_ = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 |
"""simple docstring"""
import datasets
SCREAMING_SNAKE_CASE_ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
SCREAMING_SNAKE_CASE_ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
SCREAMING_SNAKE_CASE_ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A__ ( A__ , A__ ) -> Tuple:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __A ( self , snake_case_ , snake_case_ ) -> Dict:
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
| 579 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
snake_case = logging.get_logger(__name__)
snake_case = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''whisper'''
SCREAMING_SNAKE_CASE_ : str = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int ,__A : Dict=5_1865 ,__A : List[str]=80 ,__A : str=6 ,__A : List[str]=4 ,__A : int=6 ,__A : Optional[Any]=4 ,__A : int=1536 ,__A : List[str]=1536 ,__A : Optional[Any]=0.0 ,__A : Union[str, Any]=0.0 ,__A : int=5_0257 ,__A : Dict=True ,__A : int=True ,__A : int="gelu" ,__A : Union[str, Any]=256 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : Optional[int]=0.0 ,__A : List[Any]=0.02 ,__A : Optional[int]=False ,__A : int=1500 ,__A : Union[str, Any]=448 ,__A : Optional[Any]=5_0256 ,__A : List[str]=5_0256 ,__A : Dict=5_0256 ,__A : int=None ,__A : Optional[Any]=[220, 5_0256] ,__A : Optional[Any]=False ,__A : Dict=256 ,__A : Tuple=False ,__A : Union[str, Any]=0.05 ,__A : Optional[Any]=10 ,__A : int=2 ,__A : Optional[int]=0.0 ,__A : Optional[int]=10 ,__A : Any=0 ,__A : Optional[int]=7 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = num_mel_bins
_lowercase = d_model
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = encoder_ffn_dim
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase = max_source_positions
_lowercase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_lowercase = classifier_proj_size
_lowercase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase = apply_spec_augment
_lowercase = mask_time_prob
_lowercase = mask_time_length
_lowercase = mask_time_min_masks
_lowercase = mask_feature_prob
_lowercase = mask_feature_length
_lowercase = mask_feature_min_masks
_lowercase = median_filter_width
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,suppress_tokens=__A ,begin_suppress_tokens=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
_lowercase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
return common_inputs
def __UpperCAmelCase ( self : Tuple ,__A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional["TensorType"] = None ,__A : int = 2_2050 ,__A : float = 5.0 ,__A : int = 220 ,) -> Mapping[str, Any]:
_lowercase = OrderedDict()
_lowercase = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=__A ,framework=__A ,sampling_rate=__A ,time_duration=__A ,frequency=__A ,)
_lowercase = encoder_inputs['input_features'].shape[2]
_lowercase = encoder_sequence_length // 2 if self.use_past else seq_length
_lowercase = super().generate_dummy_inputs(
preprocessor.tokenizer ,__A ,__A ,__A ,__A )
_lowercase = encoder_inputs.pop('input_features' )
_lowercase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
_lowercase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> float:
return 1e-3
| 67 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = IFInpaintingSuperResolutionPipeline
__UpperCamelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__UpperCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__UpperCamelCase : Dict = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCAmelCase__ ( self : Union[str, Any] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[int] , snake_case_ : str=0 ):
if str(snake_case_ ).startswith("""mps""" ):
UpperCamelCase_: Union[str, Any] = torch.manual_seed(snake_case_ )
else:
UpperCamelCase_: Tuple = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase_: Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
UpperCamelCase_: int = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
UpperCamelCase_: str = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
UpperCamelCase_: Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self : List[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ ( self : str ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ ( self : str ):
self._test_save_load_local()
def lowerCAmelCase__ ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 548 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ :Optional[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Union[str, Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Optional[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
a_ :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250 |
def a ( A__ = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : str = int(A__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
SCREAMING_SNAKE_CASE__ : Any = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = i
while n % i == 0:
SCREAMING_SNAKE_CASE__ : List[str] = n // i
i += 1
return int(A__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 250 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class a_ :
def __init__( self , UpperCAmelCase = 6 ):
a_ = None
a_ = None
self.create_linked_list(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = Node()
a_ = current_node
a_ = current_node
a_ = current_node
for _ in range(1 , UpperCAmelCase ):
a_ = Node()
a_ = current_node
a_ = previous_node
a_ = current_node
a_ = self.front
a_ = previous_node
def lowerCAmelCase__ ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCAmelCase__ ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
a_ = self.rear.next
if self.rear:
a_ = data
def lowerCAmelCase__ ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
a_ = self.front.data
a_ = None
return data
a_ = self.front
a_ = old_front.next
a_ = old_front.data
a_ = None
return data
def lowerCAmelCase__ ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def lowerCAmelCase__ ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class a_ :
def __init__( self ):
a_ = None
a_ = None
a_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase__ =logging.get_logger(__name__)
# General docstring
lowercase__ ='PoolFormerConfig'
# Base docstring
lowercase__ ='sail/poolformer_s12'
lowercase__ =[1, 5_12, 7, 7]
# Image classification docstring
lowercase__ ='sail/poolformer_s12'
lowercase__ ='tabby, tabby cat'
lowercase__ =[
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCamelCase_ ( A__ , A__ = 0.0 , A__ = False ):
if drop_prob == 0.0 or not training:
return input
a_ = 1 - drop_prob
a_ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
a_ = keep_prob + torch.rand(A__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
a_ = input.div(A__ ) * random_tensor
return output
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase = None ):
super().__init__()
a_ = drop_prob
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return drop_path(UpperCAmelCase , self.drop_prob , self.training )
def lowerCAmelCase__ ( self ):
return "p={}".format(self.drop_prob )
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ):
super().__init__()
a_ = patch_size if isinstance(UpperCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
a_ = stride if isinstance(UpperCAmelCase , collections.abc.Iterable ) else (stride, stride)
a_ = padding if isinstance(UpperCAmelCase , collections.abc.Iterable ) else (padding, padding)
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , kernel_size=UpperCAmelCase , stride=UpperCAmelCase , padding=UpperCAmelCase )
a_ = norm_layer(UpperCAmelCase ) if norm_layer else nn.Identity()
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.projection(UpperCAmelCase )
a_ = self.norm(UpperCAmelCase )
return embeddings
class a_ ( nn.GroupNorm ):
def __init__( self , UpperCAmelCase , **UpperCAmelCase ):
super().__init__(1 , UpperCAmelCase , **UpperCAmelCase )
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
a_ = nn.AvgPoolad(UpperCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return self.pool(UpperCAmelCase ) - hidden_states
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
super().__init__()
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
a_ = PoolFormerDropPath(UpperCAmelCase )
if isinstance(config.hidden_act , UpperCAmelCase ):
a_ = ACTaFN[config.hidden_act]
else:
a_ = config.hidden_act
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.conva(UpperCAmelCase )
a_ = self.act_fn(UpperCAmelCase )
a_ = self.drop(UpperCAmelCase )
a_ = self.conva(UpperCAmelCase )
a_ = self.drop(UpperCAmelCase )
return hidden_states
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
super().__init__()
a_ = PoolFormerPooling(UpperCAmelCase )
a_ = PoolFormerOutput(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = PoolFormerGroupNorm(UpperCAmelCase )
a_ = PoolFormerGroupNorm(UpperCAmelCase )
# Useful for training neural nets
a_ = PoolFormerDropPath(UpperCAmelCase ) if drop_path > 0.0 else nn.Identity()
a_ = config.use_layer_scale
if config.use_layer_scale:
a_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCAmelCase) ) , requires_grad=UpperCAmelCase )
a_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCAmelCase) ) , requires_grad=UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if self.use_layer_scale:
a_ = self.pooling(self.before_norm(UpperCAmelCase ) )
a_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
a_ = hidden_states + self.drop_path(UpperCAmelCase )
a_ = ()
a_ = self.output(self.after_norm(UpperCAmelCase ) )
a_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
a_ = hidden_states + self.drop_path(UpperCAmelCase )
a_ = (output,) + outputs
return outputs
else:
a_ = self.drop_path(self.pooling(self.before_norm(UpperCAmelCase ) ) )
# First residual connection
a_ = pooling_output + hidden_states
a_ = ()
# Second residual connection inside the PoolFormerOutput block
a_ = self.drop_path(self.output(self.after_norm(UpperCAmelCase ) ) )
a_ = hidden_states + layer_output
a_ = (output,) + outputs
return outputs
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
a_ = config
# stochastic depth decay rule
a_ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
a_ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
a_ = nn.ModuleList(UpperCAmelCase )
# Transformer blocks
a_ = []
a_ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
a_ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCAmelCase ) )
a_ = nn.ModuleList(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=True ):
a_ = () if output_hidden_states else None
a_ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
a_ , a_ = layers
# Get patch embeddings from hidden_states
a_ = embedding_layer(UpperCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCAmelCase ):
a_ = blk(UpperCAmelCase )
a_ = layer_outputs[0]
if output_hidden_states:
a_ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase , hidden_states=UpperCAmelCase )
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = PoolFormerConfig
lowerCamelCase__ : Optional[Any] = 'poolformer'
lowerCamelCase__ : List[Any] = 'pixel_values'
lowerCamelCase__ : int = True
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if isinstance(UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
a_ = value
lowercase__ =r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase__ =r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , UpperCamelCase__ , )
class a_ ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
a_ = config
a_ = PoolFormerEncoder(UpperCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase__ ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ):
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
a_ = self.encoder(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , )
a_ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
a_ = nn.Linear(config.hidden_size , config.hidden_size )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.dense(UpperCAmelCase )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , UpperCamelCase__ , )
class a_ ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
a_ = config.num_labels
a_ = PoolFormerModel(UpperCAmelCase )
# Final norm
a_ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
a_ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ):
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.poolformer(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , )
a_ = outputs[0]
a_ = self.classifier(self.norm(UpperCAmelCase ).mean([-2, -1] ) )
a_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a_ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a_ = """single_label_classification"""
else:
a_ = """multi_label_classification"""
if self.config.problem_type == "regression":
a_ = MSELoss()
if self.num_labels == 1:
a_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a_ = loss_fct(UpperCAmelCase , UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
a_ = CrossEntropyLoss()
a_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a_ = BCEWithLogitsLoss()
a_ = loss_fct(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
a_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states )
| 263 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class a__ ( _snake_case ):
"""simple docstring"""
A__ : Union[str, Any] = '''biogpt'''
def __init__( self :Tuple , lowercase__ :Dict=4_2384 , lowercase__ :Optional[int]=1024 , lowercase__ :int=24 , lowercase__ :List[Any]=16 , lowercase__ :str=4096 , lowercase__ :List[str]="gelu" , lowercase__ :Tuple=0.1 , lowercase__ :Optional[int]=0.1 , lowercase__ :Dict=1024 , lowercase__ :List[Any]=0.02 , lowercase__ :int=1E-12 , lowercase__ :Any=True , lowercase__ :List[Any]=True , lowercase__ :Optional[int]=0.0 , lowercase__ :Optional[Any]=0.0 , lowercase__ :List[Any]=1 , lowercase__ :Dict=0 , lowercase__ :List[Any]=2 , **lowercase__ :Dict , ):
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = scale_embedding
lowercase = use_cache
lowercase = layerdrop
lowercase = activation_dropout
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
| 314 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314 | 1 |
import sys
UpperCamelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[Any] = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def __magic_name__ ( SCREAMING_SNAKE_CASE = N ) -> int:
_lowercase : Dict = -sys.maxsize - 1
_lowercase : Tuple = n[:13]
_lowercase : List[Any] = 13
while cur_index < len(SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_lowercase : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
_lowercase : str = max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) )
_lowercase : Dict = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['OwlViTFeatureExtractor']
a = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__A : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__A : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __UpperCamelCase ( _A : int , _A : str , _A : int=8 ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase_ =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _SCREAMING_SNAKE_CASE ( __lowercase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Optional[Any]:
super().__init__()
self.register_modules(
unet=__a , scheduler=__a , movq=__a , )
lowerCamelCase_ =2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Dict:
if latents is None:
lowerCamelCase_ =randn_tensor(__a , generator=__a , device=__a , dtype=__a )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase_ =latents.to(__a )
lowerCamelCase_ =latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0 )-> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCamelCase_ =torch.device(f'cuda:{gpu_id}' )
lowerCamelCase_ =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0 )-> List[str]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCamelCase_ =torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase_ =None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase_ =cpu_offload_with_hook(__a , __a , prev_module_hook=__a )
# We'll offload the last model manually.
lowerCamelCase_ =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self )-> Union[str, Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__a )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , )-> Optional[int]:
lowerCamelCase_ =self._execution_device
lowerCamelCase_ =guidance_scale > 1.0
if isinstance(__a , __a ):
lowerCamelCase_ =torch.cat(__a , dim=0 )
lowerCamelCase_ =image_embeds.shape[0] * num_images_per_prompt
if isinstance(__a , __a ):
lowerCamelCase_ =torch.cat(__a , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ =image_embeds.repeat_interleave(__a , dim=0 )
lowerCamelCase_ =negative_image_embeds.repeat_interleave(__a , dim=0 )
lowerCamelCase_ =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__a )
self.scheduler.set_timesteps(__a , device=__a )
lowerCamelCase_ =self.scheduler.timesteps
lowerCamelCase_ =self.unet.config.in_channels
lowerCamelCase_ =downscale_height_and_width(__a , __a , self.movq_scale_factor )
# create initial latent
lowerCamelCase_ =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __a , __a , __a , self.scheduler , )
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ ={"""image_embeds""": image_embeds}
lowerCamelCase_ =self.unet(
sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0]
if do_classifier_free_guidance:
lowerCamelCase_ =noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase_ =noise_pred.chunk(2 )
lowerCamelCase_ =variance_pred.chunk(2 )
lowerCamelCase_ =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase_ =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase_ =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
__a , __a , __a , generator=__a , )[0]
# post-processing
lowerCamelCase_ =self.movq.decode(__a , force_not_quantize=__a )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowerCamelCase_ =image * 0.5 + 0.5
lowerCamelCase_ =image.clamp(0 , 1 )
lowerCamelCase_ =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 720 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 75 | 0 |
"""simple docstring"""
def UpperCAmelCase_ ( __a : str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCAmelCase_ ( __a : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_lowerCamelCase : Union[str, Any] = remove_duplicates(key.upper() )
_lowerCamelCase : Optional[int] = len(__a )
# First fill cipher with key characters
_lowerCamelCase : Dict = {alphabet[i]: char for i, char in enumerate(__a )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__a ) , 26 ):
_lowerCamelCase : Optional[int] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_lowerCamelCase : Optional[int] = alphabet[i - offset]
_lowerCamelCase : Optional[Any] = char
return cipher_alphabet
def UpperCAmelCase_ ( __a : str , __a : dict[str, str] ):
'''simple docstring'''
return "".join(cipher_map.get(__a , __a ) for ch in message.upper() )
def UpperCAmelCase_ ( __a : str , __a : dict[str, str] ):
'''simple docstring'''
_lowerCamelCase : Any = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__a , __a ) for ch in message.upper() )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : Dict = input('Enter message to encode or decode: ' ).strip()
_lowerCamelCase : Dict = input('Enter keyword: ' ).strip()
_lowerCamelCase : Dict = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
_lowerCamelCase : Tuple = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
_lowerCamelCase : str = create_cipher_map(__a )
print(func(__a , __a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 437 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = 'laion/clap-htsat-unfused'
_lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
def _lowerCAmelCase ( self , **A ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **A )
def _lowerCAmelCase ( self , **A ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCamelCase : List[str] = ClapProcessor(tokenizer=A , feature_extractor=A )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : str = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCamelCase : str = self.get_feature_extractor(do_normalize=A , padding_value=1.0 )
_lowerCamelCase : List[Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = self.get_feature_extractor()
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : Any = ClapProcessor(tokenizer=A , feature_extractor=A )
_lowerCamelCase : int = floats_list((3, 1000) )
_lowerCamelCase : str = feature_extractor(A , return_tensors='np' )
_lowerCamelCase : List[str] = processor(audios=A , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = self.get_feature_extractor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = ClapProcessor(tokenizer=A , feature_extractor=A )
_lowerCamelCase : List[str] = 'This is a test string'
_lowerCamelCase : Any = processor(text=A )
_lowerCamelCase : Optional[int] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = self.get_feature_extractor()
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : Optional[int] = ClapProcessor(tokenizer=A , feature_extractor=A )
_lowerCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : int = processor.batch_decode(A )
_lowerCamelCase : int = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : int = ClapProcessor(tokenizer=A , feature_extractor=A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 437 | 1 |
from __future__ import annotations
from collections.abc import Generator
def _a ( ) -> Generator[int, None, None]:
"""simple docstring"""
lowerCamelCase__ : dict[int, int] = {}
lowerCamelCase__ : Optional[Any] = 2
while True:
lowerCamelCase__ : Optional[int] = factor_map.pop(UpperCAmelCase , UpperCAmelCase )
if factor:
lowerCamelCase__ : Dict = factor + prime
while x in factor_map:
x += factor
lowerCamelCase__ : Dict = factor
else:
lowerCamelCase__ : Any = prime
yield prime
prime += 1
def _a ( UpperCAmelCase = 1E1_0 ) -> int:
"""simple docstring"""
lowerCamelCase__ : str = sieve()
lowerCamelCase__ : Any = 1
while True:
lowerCamelCase__ : List[Any] = next(UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 719 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_A : int = logging.get_logger(__name__)
_A : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_A : Optional[int] = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
_A : Any = {
'roberta-base': 5_12,
'roberta-large': 5_12,
'roberta-large-mnli': 5_12,
'distilroberta-base': 5_12,
'roberta-base-openai-detector': 5_12,
'roberta-large-openai-detector': 5_12,
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : str = VOCAB_FILES_NAMES
_UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : int = ["input_ids", "attention_mask"]
_UpperCAmelCase : List[str] = RobertaTokenizer
def __init__( self : List[str] , A : List[str]=None , A : List[Any]=None , A : Optional[int]=None , A : Tuple="replace" , A : int="<s>" , A : Any="</s>" , A : Optional[int]="</s>" , A : Tuple="<s>" , A : int="<unk>" , A : Optional[int]="<pad>" , A : Tuple="<mask>" , A : Optional[Any]=False , A : Optional[int]=True , **A : Optional[int] , ) ->Dict:
super().__init__(
A , A , tokenizer_file=A , errors=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , add_prefix_space=A , trim_offsets=A , **A , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A ) != add_prefix_space:
lowerCamelCase__ : Union[str, Any] = getattr(A , pre_tok_state.pop('''type''' ) )
lowerCamelCase__ : Optional[int] = add_prefix_space
lowerCamelCase__ : List[str] = pre_tok_class(**A )
lowerCamelCase__ : Any = add_prefix_space
lowerCamelCase__ : Any = '''post_processor'''
lowerCamelCase__ : str = getattr(self.backend_tokenizer , A , A )
if tokenizer_component_instance:
lowerCamelCase__ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : Any = tuple(state['''sep'''] )
if "cls" in state:
lowerCamelCase__ : Tuple = tuple(state['''cls'''] )
lowerCamelCase__ : Tuple = False
if state.get('''add_prefix_space''' , A ) != add_prefix_space:
lowerCamelCase__ : Optional[Any] = add_prefix_space
lowerCamelCase__ : Optional[Any] = True
if state.get('''trim_offsets''' , A ) != trim_offsets:
lowerCamelCase__ : Tuple = trim_offsets
lowerCamelCase__ : str = True
if changes_to_apply:
lowerCamelCase__ : Optional[int] = getattr(A , state.pop('''type''' ) )
lowerCamelCase__ : str = component_class(**A )
setattr(self.backend_tokenizer , A , A )
@property
def __lowerCamelCase ( self : List[str] ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self : int , A : List[Any] ) ->List[Any]:
lowerCamelCase__ : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else value
lowerCamelCase__ : int = value
def __lowerCamelCase ( self : Tuple , *A : List[Any] , **A : Optional[Any] ) ->BatchEncoding:
lowerCamelCase__ : Dict = kwargs.get('''is_split_into_words''' , A )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A , **A )
def __lowerCamelCase ( self : Tuple , *A : List[Any] , **A : Tuple ) ->BatchEncoding:
lowerCamelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , A )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A , **A )
def __lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None ) ->Tuple[str]:
lowerCamelCase__ : List[str] = self._tokenizer.model.save(A , name=A )
return tuple(A )
def __lowerCamelCase ( self : Dict , A : List[str] , A : List[Any]=None ) ->List[str]:
lowerCamelCase__ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None ) ->List[int]:
lowerCamelCase__ : int = [self.sep_token_id]
lowerCamelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 130 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : int = ["image_processor", "tokenizer"]
lowerCAmelCase__ : List[Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : List[Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : str ,UpperCamelCase : int=None ,UpperCamelCase : Tuple=None ,**UpperCamelCase : Optional[Any] ) -> int:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,UpperCamelCase ,)
_lowercase : int = kwargs.pop('feature_extractor' )
_lowercase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase ,UpperCamelCase )
def __call__( self : Optional[int] ,UpperCamelCase : int ,UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,UpperCamelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None ,UpperCamelCase : Union[List[List[int]], List[List[List[int]]]] = None ,UpperCamelCase : Optional[Union[List[int], List[List[int]]]] = None ,UpperCamelCase : bool = True ,UpperCamelCase : Union[bool, str, PaddingStrategy] = False ,UpperCamelCase : Union[bool, str, TruncationStrategy] = None ,UpperCamelCase : Optional[int] = None ,UpperCamelCase : int = 0 ,UpperCamelCase : Optional[int] = None ,UpperCamelCase : Optional[bool] = None ,UpperCamelCase : Optional[bool] = None ,UpperCamelCase : bool = False ,UpperCamelCase : bool = False ,UpperCamelCase : bool = False ,UpperCamelCase : bool = False ,UpperCamelCase : bool = True ,UpperCamelCase : Optional[Union[str, TensorType]] = None ,**UpperCamelCase : Dict ,) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
_lowercase : List[str] = self.image_processor(images=UpperCamelCase ,return_tensors=UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase ,UpperCamelCase ):
_lowercase : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_lowercase : Union[str, Any] = features['words']
_lowercase : List[Any] = self.tokenizer(
text=text if text is not None else features['words'] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features['boxes'] ,word_labels=UpperCamelCase ,add_special_tokens=UpperCamelCase ,padding=UpperCamelCase ,truncation=UpperCamelCase ,max_length=UpperCamelCase ,stride=UpperCamelCase ,pad_to_multiple_of=UpperCamelCase ,return_token_type_ids=UpperCamelCase ,return_attention_mask=UpperCamelCase ,return_overflowing_tokens=UpperCamelCase ,return_special_tokens_mask=UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,return_length=UpperCamelCase ,verbose=UpperCamelCase ,return_tensors=UpperCamelCase ,**UpperCamelCase ,)
# add pixel values
_lowercase : Any = features.pop('pixel_values' )
if return_overflowing_tokens is True:
_lowercase : Union[str, Any] = self.get_overflowing_images(UpperCamelCase ,encoded_inputs['overflow_to_sample_mapping'] )
_lowercase : Tuple = images
return encoded_inputs
def _lowerCamelCase ( self : int ,UpperCamelCase : Any ,UpperCamelCase : str ) -> Union[str, Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_lowercase : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[str] ,*UpperCamelCase : Any ,**UpperCamelCase : Union[str, Any] ) -> Tuple:
return self.tokenizer.batch_decode(*UpperCamelCase ,**UpperCamelCase )
def _lowerCamelCase ( self : str ,*UpperCamelCase : List[Any] ,**UpperCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.decode(*UpperCamelCase ,**UpperCamelCase )
@property
def _lowerCamelCase ( self : List[str] ) -> Dict:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : int ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,UpperCamelCase ,)
return self.image_processor_class
@property
def _lowerCamelCase ( self : Any ) -> Any:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,UpperCamelCase ,)
return self.image_processor
| 125 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
A = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
A = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
A = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) ,homepage='https://github.com/hendrycks/math' ,codebase_urls=['https://github.com/hendrycks/math'] ,)
def _lowerCamelCase ( self : int ,UpperCamelCase : int ,UpperCamelCase : Optional[int] ) -> Optional[Any]:
_lowercase : Optional[int] = 0.0
for i, j in zip(UpperCamelCase ,UpperCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(UpperCamelCase ,UpperCamelCase ) else 0.0
_lowercase : Any = n_correct / len(UpperCamelCase )
return {
"accuracy": accuracy,
}
| 125 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
debug_launcher(test_script.main )
def snake_case_ ( self ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 111 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_a : Optional[Any] = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def snake_case__ ( UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=None ):
require_version(deps[pkg] , UpperCAmelCase )
| 111 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , UpperCAmelCase : int = 768 , ):
super().__init__()
A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ):
A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def __A ( self : Any , UpperCAmelCase : int ):
A_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self : Dict , UpperCAmelCase : int ):
A_ = (embeds * self.std) + self.mean
return embeds
| 86 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
lowerCamelCase_ : str
lowerCamelCase_ : str = None
@staticmethod
def lowerCamelCase () -> Any:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
return F'''`pip install {cls.pip_package or cls.name}`'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = '''optuna'''
@staticmethod
def lowerCamelCase () -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_optuna(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''ray'''
lowerCamelCase_ : List[str] = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase () -> List[Any]:
'''simple docstring'''
return is_ray_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_ray(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''sigopt'''
@staticmethod
def lowerCamelCase () -> Optional[int]:
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return default_hp_space_sigopt(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''wandb'''
@staticmethod
def lowerCamelCase () -> Dict:
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_wandb(__magic_name__ )
lowerCAmelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
snake_case_ : Dict = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 60 | 0 |
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase ( _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__UpperCAmelCase : List[str] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
__UpperCAmelCase : Dict = float(matches[1] )
__UpperCAmelCase : Tuple = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Optional[int] = 1_0_0_1
__UpperCAmelCase : int = """imagenet-1k-id2label.json"""
__UpperCAmelCase : List[str] = """huggingface/label-files"""
__UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase : Any = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = """background"""
__UpperCAmelCase : int = idalabel
__UpperCAmelCase : str = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : List[Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any]=False ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
__UpperCAmelCase : Tuple = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : int = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 3_2} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
__UpperCAmelCase : Dict = model(**lowerCamelCase__ )
__UpperCAmelCase : Any = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Optional[int] = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
__UpperCAmelCase : List[str] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
__UpperCAmelCase : Optional[int] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 721 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
'''simple docstring'''
if not (isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
__UpperCAmelCase : List[str] = len(_UpperCamelCase )
__UpperCAmelCase : List[str] = len(_UpperCamelCase )
__UpperCAmelCase : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : List[Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__UpperCAmelCase : Optional[Any] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__UpperCAmelCase : Optional[int] = i
__UpperCAmelCase : Any = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 | 0 |
def UpperCamelCase ( snake_case__ : int ,snake_case__ : int ):
'''simple docstring'''
return 1 if input_a == input_a else 0
def UpperCamelCase ( ):
'''simple docstring'''
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 455 |
def UpperCamelCase ( snake_case__ : float ,snake_case__ : int ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(snake_case__ ) ,snake_case__ )
return number - int(snake_case__ )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 455 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=9_9 , _lowerCAmelCase : Dict=1_3 , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : int=9 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Union[str, Any]=3_2 , _lowerCAmelCase : Optional[int]=5 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Any=3_7 , _lowerCAmelCase : List[str]=8 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Optional[int]=0.002 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[Any]=0 , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=None , ):
'''simple docstring'''
__lowercase =parent
__lowercase =batch_size
__lowercase =encoder_seq_length
__lowercase =decoder_seq_length
# For common tests
__lowercase =self.decoder_seq_length
__lowercase =is_training
__lowercase =use_attention_mask
__lowercase =use_labels
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =d_ff
__lowercase =relative_attention_num_buckets
__lowercase =dropout_rate
__lowercase =initializer_factor
__lowercase =eos_token_id
__lowercase =pad_token_id
__lowercase =decoder_start_token_id
__lowercase =None
__lowercase =decoder_layers
def __lowerCamelCase ( self : int):
'''simple docstring'''
return TaConfig.from_pretrained('google/umt5-base')
def __lowerCamelCase ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Dict=None , ):
'''simple docstring'''
if attention_mask is None:
__lowercase =input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
__lowercase =decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
__lowercase =torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_lowerCAmelCase)
if decoder_head_mask is None:
__lowercase =torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_lowerCAmelCase)
if cross_attn_head_mask is None:
__lowercase =torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_lowerCAmelCase)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
__lowercase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowercase =input_ids.clamp(self.pad_token_id + 1)
__lowercase =decoder_input_ids.clamp(self.pad_token_id + 1)
__lowercase =self.get_config()
__lowercase =config.num_attention_heads
__lowercase =self.prepare_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
return config, input_dict
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase , __lowercase =self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , ):
'''simple docstring'''
__lowercase =UMTaModel(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__lowercase =model(
input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , )
__lowercase =model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase)
__lowercase =result.last_hidden_state
__lowercase =result.past_key_values
__lowercase =result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_lowerCAmelCase) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , ):
'''simple docstring'''
__lowercase =UMTaModel(config=_lowerCAmelCase).get_decoder().to(_lowerCAmelCase).eval()
# first forward pass
__lowercase =model(_lowerCAmelCase , use_cache=_lowerCAmelCase)
__lowercase =model(_lowerCAmelCase)
__lowercase =model(_lowerCAmelCase , use_cache=_lowerCAmelCase)
self.parent.assertTrue(len(_lowerCAmelCase) == len(_lowerCAmelCase))
self.parent.assertTrue(len(_lowerCAmelCase) == len(_lowerCAmelCase) + 1)
__lowercase , __lowercase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase =ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
__lowercase =torch.cat([input_ids, next_tokens] , dim=-1)
__lowercase =model(_lowerCAmelCase)['last_hidden_state']
__lowercase =model(_lowerCAmelCase , past_key_values=_lowerCAmelCase)['last_hidden_state']
# select random slice
__lowercase =ids_tensor((1,) , output_from_past.shape[-1]).item()
__lowercase =output_from_no_past[:, -1, random_slice_idx].detach()
__lowercase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3))
def __lowerCamelCase ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , ):
'''simple docstring'''
__lowercase =UMTaModel(config=_lowerCAmelCase).to(_lowerCAmelCase).half().eval()
__lowercase =model(**_lowerCAmelCase)['last_hidden_state']
self.parent.assertFalse(torch.isnan(_lowerCAmelCase).any().item())
@require_torch
class _UpperCamelCase ( A , A , A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCAmelCase__ = [0.8, 0.9]
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =UMTaModelTester(self)
@unittest.skip('Test has a segmentation fault on torch 1.8.0')
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
__lowercase =UMTaModel(config_and_inputs[0]).to(_lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_lowerCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=_lowerCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision')
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_lowerCAmelCase)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =['encoder_attentions', 'decoder_attentions', 'cross_attentions']
__lowercase =self.model_tester.prepare_config_and_inputs()
__lowercase =config_and_inputs[0]
__lowercase =UMTaForConditionalGeneration(_lowerCAmelCase).eval()
model.to(_lowerCAmelCase)
__lowercase ={
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=_lowerCAmelCase),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_lowerCAmelCase),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_lowerCAmelCase),
}
for attn_name, (name, mask) in zip(_lowerCAmelCase , head_masking.items()):
__lowercase ={name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowercase =torch.ones(
config.num_decoder_layers , config.num_heads , device=_lowerCAmelCase)
__lowercase =model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=_lowerCAmelCase , return_dict_in_generate=_lowerCAmelCase , **_lowerCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowercase =out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.')
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged')
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=_lowerCAmelCase).to(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=_lowerCAmelCase , legacy=_lowerCAmelCase)
__lowercase =[
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
__lowercase =tokenizer(_lowerCAmelCase , return_tensors='pt' , padding=_lowerCAmelCase).input_ids
# fmt: off
__lowercase =torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
])
# fmt: on
torch.testing.assert_allclose(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =model.generate(input_ids.to(_lowerCAmelCase))
__lowercase =[
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
__lowercase =tokenizer.batch_decode(_lowerCAmelCase)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase)
| 454 |
'''simple docstring'''
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict):
'''simple docstring'''
__lowercase =(0, 0)
__lowercase =None
__lowercase =0
__lowercase =0
__lowercase =0
def __eq__( self : List[str] , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
return self.position == cell.position
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
print(self.position)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : List[str]=(5, 5)):
'''simple docstring'''
__lowercase =np.zeros(_lowerCAmelCase)
__lowercase =world_size[0]
__lowercase =world_size[1]
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
print(self.w)
def __lowerCamelCase ( self : int , _lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =[
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowercase =cell.position[0]
__lowercase =cell.position[1]
__lowercase =[]
for n in neughbour_cord:
__lowercase =current_x + n[0]
__lowercase =current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowercase =Cell()
__lowercase =(x, y)
__lowercase =cell
neighbours.append(_lowerCAmelCase)
return neighbours
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
__lowercase =[]
_open.append(_lowerCAmelCase )
while _open:
__lowercase =np.argmin([n.f for n in _open] )
__lowercase =_open[min_f]
_closed.append(_open.pop(_lowerCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(_lowerCAmelCase ):
for c in _closed:
if c == n:
continue
__lowercase =current.g + 1
__lowercase , __lowercase =n.position
__lowercase , __lowercase =goal.position
__lowercase =(ya - ya) ** 2 + (xa - xa) ** 2
__lowercase =n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_lowerCAmelCase )
__lowercase =[]
while current.parent is not None:
path.append(current.position )
__lowercase =current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowerCamelCase = Gridworld()
# Start position and goal
lowerCamelCase = Cell()
lowerCamelCase = (0, 0)
lowerCamelCase = Cell()
lowerCamelCase = (4, 4)
print(f"path from {start.position} to {goal.position}")
lowerCamelCase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowerCamelCase = 1
print(world.w)
| 454 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCAmelCase_ ( ):
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=_snake_case , default=_snake_case , required=_snake_case , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=_snake_case , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=_snake_case , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=_snake_case , default=4_2 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=_snake_case , default=0 , help="""cuda_id.""" , )
lowercase_ = parser.parse_args()
return args
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if not len(_snake_case ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
lowercase_ , lowercase_ = imgs[0].size
lowercase_ = Image.new("""RGB""" , size=(cols * w, rows * h) )
lowercase_ , lowercase_ = grid.size
for i, img in enumerate(_snake_case ):
grid.paste(_snake_case , box=(i % cols * w, i // cols * h) )
return grid
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__="robotic cat with wings" , UpperCAmelCase__=7.5 , UpperCAmelCase__=5_0 , UpperCAmelCase__=1 , UpperCAmelCase__=4_2 , ):
lowercase_ = torch.Generator(pipeline.device ).manual_seed(_snake_case )
lowercase_ = pipeline(
_snake_case , guidance_scale=_snake_case , num_inference_steps=_snake_case , generator=_snake_case , num_images_per_prompt=_snake_case , ).images
lowercase_ = int(math.sqrt(_snake_case ) )
lowercase_ = image_grid(_snake_case , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
a = parse_args()
# Load models and create wrapper for stable diffusion
a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
a = unet.to(torch.device('cuda', args.cuda_id))
a = pipeline.to(unet.device)
a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 412 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCAmelCase : Optional[int] = "__DUMMY_TRANSFORMERS_USER__"
_lowerCAmelCase : Dict = "Dummy User"
_lowerCAmelCase : Union[str, Any] = "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"
_lowerCAmelCase : List[Any] = "https://hub-ci.huggingface.co"
_lowerCAmelCase : Union[str, Any] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
_lowerCAmelCase : int = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
_lowerCAmelCase : Dict = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , _snake_case )
@pytest.fixture
def UpperCamelCase_( _snake_case : Any ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , _snake_case )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , _snake_case )
@pytest.fixture
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , _snake_case )
@pytest.fixture
def UpperCamelCase_( _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
HfFolder.save_token(_snake_case )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def UpperCamelCase_( ):
"""simple docstring"""
return HfApi(endpoint=_snake_case )
@pytest.fixture(scope='session' )
def UpperCamelCase_( _snake_case : HfApi ):
"""simple docstring"""
__a =HfFolder.get_token()
HfFolder.save_token(_snake_case )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_snake_case )
@pytest.fixture
def UpperCamelCase_( _snake_case : Optional[int] ):
"""simple docstring"""
def _cleanup_repo(_snake_case : Optional[int] ):
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
@contextmanager
def _temporary_repo(_snake_case : Optional[int] ):
try:
yield repo_id
finally:
cleanup_repo(_snake_case )
return _temporary_repo
@pytest.fixture(scope='session' )
def UpperCamelCase_( _snake_case : HfApi , _snake_case : Optional[int] , _snake_case : List[Any] ):
"""simple docstring"""
__a =F'repo_txt_data-{int(time.time() * 1_0e3 )}'
__a =F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_snake_case , token=_snake_case , repo_type='dataset' , private=_snake_case )
hf_api.upload_file(
token=_snake_case , path_or_fileobj=str(_snake_case ) , path_in_repo='data/text_data.txt' , repo_id=_snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase_( _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def UpperCamelCase_( _snake_case : HfApi , _snake_case : List[str] , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =F'repo_zipped_txt_data-{int(time.time() * 1_0e3 )}'
__a =F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_snake_case , token=_snake_case , repo_type='dataset' , private=_snake_case )
hf_api.upload_file(
token=_snake_case , path_or_fileobj=str(_snake_case ) , path_in_repo='data.zip' , repo_id=_snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase_( _snake_case : str , _snake_case : int , _snake_case : List[str] ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def UpperCamelCase_( _snake_case : HfApi , _snake_case : Dict , _snake_case : Optional[Any] ):
"""simple docstring"""
__a =F'repo_zipped_img_data-{int(time.time() * 1_0e3 )}'
__a =F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(_snake_case , token=_snake_case , repo_type='dataset' , private=_snake_case )
hf_api.upload_file(
token=_snake_case , path_or_fileobj=str(_snake_case ) , path_in_repo='data.zip' , repo_id=_snake_case , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_snake_case , token=_snake_case , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 242 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase (snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
if gpta_config_file == "":
lowerCAmelCase = GPTaConfig()
else:
lowerCAmelCase = GPTaConfig.from_json_file(snake_case__ )
lowerCAmelCase = GPTaModel(snake_case__ )
# Load weights from numpy
load_tf_weights_in_gpta(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
lowerCAmelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , snake_case__ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
a = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 701 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowercase (snake_case__ : str , snake_case__ : float | Decimal , snake_case__ : float = 10**-10 ) -> float:
'''simple docstring'''
lowerCAmelCase = a
while True:
lowerCAmelCase = Decimal(snake_case__ ) - (
Decimal(eval(snake_case__ ) ) / Decimal(eval(str(diff(snake_case__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case__ ) ) < precision: # noqa: S307
return float(snake_case__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 529 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE__ : Any = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def a__ ( snake_case__ : Optional[int] ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def a__ ( snake_case__ : int ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : int = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 643 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = '''conditional_detr'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , A_=True , A_=None , A_=3 , A_=3_00 , A_=6 , A_=20_48 , A_=8 , A_=6 , A_=20_48 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=2_56 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.0_2 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=2 , A_=5 , A_=2 , A_=1 , A_=1 , A_=2 , A_=5 , A_=2 , A_=0.2_5 , **A_ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase : int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(A_ , A_ ):
_UpperCAmelCase : Tuple = backbone_config.get("""model_type""" )
_UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : Optional[Any] = config_class.from_dict(A_ )
_UpperCAmelCase : Tuple = use_timm_backbone
_UpperCAmelCase : List[str] = backbone_config
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : Tuple = num_queries
_UpperCAmelCase : Dict = d_model
_UpperCAmelCase : Dict = encoder_ffn_dim
_UpperCAmelCase : Tuple = encoder_layers
_UpperCAmelCase : List[Any] = encoder_attention_heads
_UpperCAmelCase : Union[str, Any] = decoder_ffn_dim
_UpperCAmelCase : Optional[int] = decoder_layers
_UpperCAmelCase : List[str] = decoder_attention_heads
_UpperCAmelCase : Optional[Any] = dropout
_UpperCAmelCase : str = attention_dropout
_UpperCAmelCase : Union[str, Any] = activation_dropout
_UpperCAmelCase : Union[str, Any] = activation_function
_UpperCAmelCase : int = init_std
_UpperCAmelCase : Union[str, Any] = init_xavier_std
_UpperCAmelCase : Tuple = encoder_layerdrop
_UpperCAmelCase : Tuple = decoder_layerdrop
_UpperCAmelCase : Tuple = encoder_layers
_UpperCAmelCase : Any = auxiliary_loss
_UpperCAmelCase : List[str] = position_embedding_type
_UpperCAmelCase : int = backbone
_UpperCAmelCase : Any = use_pretrained_backbone
_UpperCAmelCase : int = dilation
# Hungarian matcher
_UpperCAmelCase : Dict = class_cost
_UpperCAmelCase : Optional[Any] = bbox_cost
_UpperCAmelCase : Tuple = giou_cost
# Loss coefficients
_UpperCAmelCase : Union[str, Any] = mask_loss_coefficient
_UpperCAmelCase : int = dice_loss_coefficient
_UpperCAmelCase : List[Any] = cls_loss_coefficient
_UpperCAmelCase : List[Any] = bbox_loss_coefficient
_UpperCAmelCase : int = giou_loss_coefficient
_UpperCAmelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def __snake_case( self ):
return self.encoder_attention_heads
@property
def __snake_case( self ):
return self.d_model
def __snake_case( self ):
_UpperCAmelCase : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCAmelCase : Optional[int] = self.backbone_config.to_dict()
_UpperCAmelCase : Dict = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = version.parse('''1.11''' )
@property
def __snake_case( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __snake_case( self ):
return 1e-5
@property
def __snake_case( self ):
return 12
| 643 | 1 |
"""simple docstring"""
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : int = 0
for i in range(1 , 1001 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 194 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( __lowerCamelCase ):
snake_case_ = """timm_backbone"""
def __init__( self : List[str] ,A : Any=None ,A : List[Any]=3 ,A : Any=True ,A : Union[str, Any]=True ,A : List[Any]=None ,**A : Optional[int] ,):
'''simple docstring'''
super().__init__(**A )
UpperCAmelCase__ : Optional[int] = backbone
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : Optional[int] = features_only
UpperCAmelCase__ : Tuple = use_pretrained_backbone
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[str] = out_indices if out_indices is not None else (-1,)
| 194 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE : Tuple = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
if "://" in dataset_path:
_lowercase : Dict = dataset_path.split('://' )[1]
return dataset_path
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = not is_remote_filesystem(A__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(A__ ) , fs._strip_protocol(A__ ) )
else:
fs.mv(A__ , A__ , recursive=A__ )
def UpperCamelCase_( ) -> Any:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_lowercase : Optional[Any] = None
_lowercase : Dict = None
_lowercase : Optional[Any] = threading.Lock()
| 89 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A : Dict = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[Any] , **__lowerCamelCase : Any ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _snake_case ( self : Optional[int] ):
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE = None
return tpu
@cached_property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" )
return strategy
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _snake_case ( self : Optional[Any] ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _snake_case ( self : List[str] ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _snake_case ( self : Dict ):
return self.n_gpu > 0
| 16 | 0 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
lowerCamelCase_ = namedtuple('''covid_data''', '''cases deaths recovered''')
def snake_case ( A__ = "https://www.worldometers.info/coronavirus/" ):
UpperCAmelCase_ : int = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(A__ ).content ).xpath(A__ ) )
lowerCamelCase_ = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 463 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 463 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__magic_name__: str = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> int:
super().__init__()
self.register_modules(
vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def __magic_name__ ( self , lowerCAmelCase__ = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__magic_name__ : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
self.enable_attention_slicing(lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = 5_12 , lowerCAmelCase__ = 5_12 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = 7.5 , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Dict:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : List[Any] = len(lowerCAmelCase__ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(lowerCAmelCase__ )}.' )
# get prompt text embeddings
__magic_name__ : str = self.tokenizer(
lowerCAmelCase__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__magic_name__ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__magic_name__ : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__magic_name__ : List[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__magic_name__ : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__magic_name__ ,__magic_name__ ,__magic_name__ : List[str] = text_embeddings.shape
__magic_name__ : int = text_embeddings.repeat(1 , lowerCAmelCase__ , 1 )
__magic_name__ : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__magic_name__ : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__magic_name__ : List[str]
if negative_prompt is None:
__magic_name__ : str = [""""""]
elif type(lowerCAmelCase__ ) is not type(lowerCAmelCase__ ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__ )} !='
F' {type(lowerCAmelCase__ )}.' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : List[Any] = [negative_prompt]
elif batch_size != len(lowerCAmelCase__ ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__ )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
__magic_name__ : Dict = negative_prompt
__magic_name__ : List[Any] = text_input_ids.shape[-1]
__magic_name__ : List[str] = self.tokenizer(
lowerCAmelCase__ , padding="""max_length""" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="""pt""" , )
__magic_name__ : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__magic_name__ : List[Any] = uncond_embeddings.shape[1]
__magic_name__ : Dict = uncond_embeddings.repeat(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
__magic_name__ : List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__magic_name__ : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__magic_name__ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__magic_name__ : Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__magic_name__ : Tuple = torch.randn(
lowerCAmelCase__ , generator=lowerCAmelCase__ , device="""cpu""" , dtype=lowerCAmelCase__ ).to(self.device )
__magic_name__ : Tuple = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device="""cpu""" , dtype=lowerCAmelCase__ ).to(
self.device )
else:
__magic_name__ : Union[str, Any] = torch.randn(
lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
__magic_name__ : List[Any] = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__magic_name__ : int = latents_reference.to(self.device )
__magic_name__ : Union[str, Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__magic_name__ : Any = (latents_shape[3] - latents_shape_reference[3]) // 2
__magic_name__ : str = (latents_shape[2] - latents_shape_reference[2]) // 2
__magic_name__ : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__magic_name__ : Any = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__magic_name__ : Optional[Any] = 0 if dx < 0 else dx
__magic_name__ : Optional[Any] = 0 if dy < 0 else dy
__magic_name__ : int = max(-dx , 0 )
__magic_name__ : Dict = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__magic_name__ : List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__magic_name__ : int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__magic_name__ : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__magic_name__ : Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__magic_name__ : List[str] = {}
if accepts_eta:
__magic_name__ : List[Any] = eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__magic_name__ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__magic_name__ : Optional[Any] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
__magic_name__ : Optional[Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
__magic_name__ ,__magic_name__ : Tuple = noise_pred.chunk(2 )
__magic_name__ : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : Any = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = 1 / 0.1_8_2_1_5 * latents
__magic_name__ : Any = self.vae.decode(lowerCAmelCase__ ).sample
__magic_name__ : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__magic_name__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__magic_name__ : List[str] = self.feature_extractor(self.numpy_to_pil(lowerCAmelCase__ ) , return_tensors="""pt""" ).to(
self.device )
__magic_name__ ,__magic_name__ : List[Any] = self.safety_checker(
images=lowerCAmelCase__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__magic_name__ : str = None
if output_type == "pil":
__magic_name__ : Optional[Any] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__ )
| 324 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__magic_name__: Optional[int] = logging.get_logger(__name__)
__magic_name__: List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__: Optional[Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__magic_name__: List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__magic_name__: Union[str, Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Dict = SqueezeBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[str]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__magic_name__ : Any = getattr(lowerCAmelCase__ , normalizer_state.pop("""type""" ) )
__magic_name__ : Any = do_lower_case
__magic_name__ : List[str] = strip_accents
__magic_name__ : int = tokenize_chinese_chars
__magic_name__ : int = normalizer_class(**lowerCAmelCase__ )
__magic_name__ : Optional[int] = do_lower_case
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]:
__magic_name__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : int = [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
__magic_name__ : Optional[Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 324 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 280 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = RoCBertTokenizer
_a = None
_a = False
_a = True
_a = filter_non_english
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
UpperCamelCase__ :Optional[Any] = {}
UpperCamelCase__ :str = {}
for i, value in enumerate(UpperCamelCase_ ):
UpperCamelCase__ :List[Any] = i
UpperCamelCase__ :List[str] = i
UpperCamelCase__ :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
UpperCamelCase__ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCamelCase_ , UpperCamelCase_ , ensure_ascii=UpperCamelCase_ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCamelCase_ , UpperCamelCase_ , ensure_ascii=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase__ :Optional[int] = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCamelCase_ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCamelCase_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase_ ) , [5, 6, 2, 5, 7, 8] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCamelCase__ :List[str] = {}
for i, token in enumerate(UpperCamelCase_ ):
UpperCamelCase__ :Tuple = i
UpperCamelCase__ :Any = RoCBertWordpieceTokenizer(vocab=UpperCamelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
UpperCamelCase__ :List[str] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
UpperCamelCase__ :List[str] = tokenizer_r.encode_plus(
UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , )
UpperCamelCase__ :int = tokenizer_r.do_lower_case if hasattr(UpperCamelCase_ , '''do_lower_case''' ) else False
UpperCamelCase__ :str = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ['''的''', '''人''', '''有''']
UpperCamelCase__ :Tuple = ''''''.join(UpperCamelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :str = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer_p.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = tokenizer_r.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer_r.convert_ids_to_tokens(UpperCamelCase_ )
UpperCamelCase__ :str = tokenizer_p.convert_ids_to_tokens(UpperCamelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Dict = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer_r.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = tokenizer_p.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer_r.convert_ids_to_tokens(UpperCamelCase_ )
UpperCamelCase__ :int = tokenizer_p.convert_ids_to_tokens(UpperCamelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase__ :Optional[Any] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(UpperCamelCase_ )
]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase__ :Union[str, Any] = tokenizer.encode('''你好''' , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Dict = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase__ :Optional[int] = '''你好,你是谁'''
UpperCamelCase__ :Optional[int] = tokenizer.tokenize(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer.convert_tokens_to_shape_ids(UpperCamelCase_ )
UpperCamelCase__ :int = tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer.prepare_for_model(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = tokenizer.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 280 | 1 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __A ( SCREAMING_SNAKE_CASE_ ):
# to overwrite at feature extractactor specific tests
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@property
def lowerCamelCase__ ( self : Dict ) -> Any:
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowerCamelCase__ ( self : List[Any] ) -> Any:
__magic_name__: Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__snake_case , """feature_size""" ) )
self.assertTrue(hasattr(__snake_case , """sampling_rate""" ) )
self.assertTrue(hasattr(__snake_case , """padding_value""" ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__: List[str] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: str = feat_extract.model_input_names[0]
__magic_name__: Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__snake_case ) == len(__snake_case ) for x, y in zip(__snake_case , processed_features[input_name] ) ) )
__magic_name__: List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case )
__magic_name__: str = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
__magic_name__: List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__magic_name__: Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
__magic_name__: Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case )
__magic_name__: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: Dict = feat_extract.model_input_names[0]
__magic_name__: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
__magic_name__: Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__magic_name__: int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
__magic_name__: Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case )
__magic_name__: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: Optional[Any] = feat_extract.model_input_names[0]
__magic_name__: Tuple = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
__magic_name__: Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__magic_name__: Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Optional[Any]=False ) -> Any:
def _inputs_have_equal_length(__snake_case : int ):
__magic_name__: Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(__snake_case ) != length:
return False
return True
def _inputs_are_equal(__snake_case : Tuple , __snake_case : List[str] ):
if len(__snake_case ) != len(__snake_case ):
return False
for input_slice_a, input_slice_a in zip(__snake_case , __snake_case ):
if not np.allclose(np.asarray(__snake_case ) , np.asarray(__snake_case ) , atol=1E-3 ):
return False
return True
__magic_name__: str = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=__snake_case )
__magic_name__: Tuple = feat_extract.model_input_names[0]
__magic_name__: Tuple = BatchFeature({input_name: speech_inputs} )
__magic_name__: List[str] = self.feat_extract_tester.seq_length_diff
__magic_name__: Optional[Any] = self.feat_extract_tester.max_seq_length + pad_diff
__magic_name__: Tuple = self.feat_extract_tester.min_seq_length
__magic_name__: List[str] = self.feat_extract_tester.batch_size
__magic_name__: int = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__magic_name__: Union[str, Any] = feat_extract.pad(__snake_case , padding=__snake_case )
__magic_name__: Optional[Any] = input_a[input_name]
__magic_name__: str = feat_extract.pad(__snake_case , padding="""longest""" )
__magic_name__: str = input_a[input_name]
__magic_name__: Optional[int] = feat_extract.pad(__snake_case , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
__magic_name__: Any = input_a[input_name]
__magic_name__: str = feat_extract.pad(__snake_case , padding="""longest""" , return_tensors="""np""" )
__magic_name__: Any = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding="""max_length""" )[input_name]
__magic_name__: Union[str, Any] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=__snake_case , return_tensors="""np""" )
__magic_name__: Tuple = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__magic_name__: Optional[int] = feat_extract.pad(__snake_case , pad_to_multiple_of=1_0 )
__magic_name__: int = input_a[input_name]
__magic_name__: Tuple = feat_extract.pad(__snake_case , padding="""longest""" , pad_to_multiple_of=1_0 )
__magic_name__: List[Any] = input_a[input_name]
__magic_name__: Tuple = feat_extract.pad(
__snake_case , padding="""max_length""" , pad_to_multiple_of=1_0 , max_length=__snake_case )
__magic_name__: int = input_a[input_name]
__magic_name__: Tuple = feat_extract.pad(
__snake_case , padding="""max_length""" , pad_to_multiple_of=1_0 , max_length=__snake_case , return_tensors="""np""" , )
__magic_name__: List[Any] = input_a[input_name]
self.assertTrue(all(len(__snake_case ) % 1_0 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case ) )
__magic_name__: Tuple = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(__snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__magic_name__: Optional[int] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def lowerCamelCase__ ( self : Any , __snake_case : Optional[int]=False ) -> Optional[Any]:
def _inputs_have_equal_length(__snake_case : Union[str, Any] ):
__magic_name__: Tuple = len(input[0] )
for input_slice in input[1:]:
if len(__snake_case ) != length:
return False
return True
def _inputs_are_equal(__snake_case : List[str] , __snake_case : Optional[int] ):
if len(__snake_case ) != len(__snake_case ):
return False
for input_slice_a, input_slice_a in zip(__snake_case , __snake_case ):
if not np.allclose(np.asarray(__snake_case ) , np.asarray(__snake_case ) , atol=1E-3 ):
return False
return True
__magic_name__: List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: List[str] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__snake_case )
__magic_name__: str = feat_extract.model_input_names[0]
__magic_name__: Optional[int] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__magic_name__: Optional[int] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=__snake_case )
__magic_name__: int = input_a[input_name]
__magic_name__: Optional[int] = feat_extract.pad(__snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
__magic_name__: Dict = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
# truncate to smallest with np
__magic_name__: Union[str, Any] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=__snake_case , )
__magic_name__: Optional[Any] = input_a[input_name]
__magic_name__: Dict = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
__magic_name__: Optional[Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
# truncate to middle
__magic_name__: List[str] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=__snake_case , return_tensors="""np""" , )
__magic_name__: Tuple = input_a[input_name]
__magic_name__: Optional[int] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=__snake_case )
__magic_name__: Any = input_a[input_name]
__magic_name__: Tuple = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
__magic_name__: Tuple = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , truncation=__snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding="""longest""" , truncation=__snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding="""longest""" , truncation=__snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__snake_case ):
feat_extract.pad(__snake_case , padding="""max_length""" , truncation=__snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__magic_name__: Tuple = 1_2
__magic_name__: Optional[Any] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
__magic_name__: Tuple = input_a[input_name]
__magic_name__: List[str] = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__snake_case , )
__magic_name__: Optional[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__magic_name__: List[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__magic_name__: List[Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__snake_case ) )
self.assertFalse(_inputs_have_equal_length(__snake_case ) )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
self._check_padding(numpify=__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
self._check_padding(numpify=__snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
self._check_truncation(numpify=__snake_case )
def lowerCamelCase__ ( self : int ) -> int:
self._check_truncation(numpify=__snake_case )
@require_torch
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__: Optional[int] = feat_extract.model_input_names[0]
__magic_name__: Optional[Any] = BatchFeature({input_name: speech_inputs} )
__magic_name__: List[Any] = feat_extract.pad(__snake_case , padding="""longest""" , return_tensors="""np""" )[input_name]
__magic_name__: Any = feat_extract.pad(__snake_case , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: Dict = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__: str = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__: Any = feat_extract.model_input_names[0]
__magic_name__: Union[str, Any] = BatchFeature({input_name: speech_inputs} )
__magic_name__: int = feat_extract.pad(__snake_case , padding="""longest""" , return_tensors="""np""" )[input_name]
__magic_name__: Tuple = feat_extract.pad(__snake_case , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: Optional[Any] = self.feat_extract_dict
__magic_name__: Optional[Any] = True
__magic_name__: List[str] = self.feature_extraction_class(**__snake_case )
__magic_name__: List[str] = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__: Union[str, Any] = [len(__snake_case ) for x in speech_inputs]
__magic_name__: int = feat_extract.model_input_names[0]
__magic_name__: List[str] = BatchFeature({input_name: speech_inputs} )
__magic_name__: Tuple = feat_extract.pad(__snake_case , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
__magic_name__: Any = self.feat_extract_dict
__magic_name__: Optional[Any] = True
__magic_name__: str = self.feature_extraction_class(**__snake_case )
__magic_name__: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__magic_name__: str = [len(__snake_case ) for x in speech_inputs]
__magic_name__: Union[str, Any] = feat_extract.model_input_names[0]
__magic_name__: Optional[int] = BatchFeature({input_name: speech_inputs} )
__magic_name__: Union[str, Any] = min(__snake_case )
__magic_name__: Dict = feat_extract.pad(
__snake_case , padding="""max_length""" , max_length=__snake_case , truncation=__snake_case , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 96 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE__ = 'examples/'
SCREAMING_SNAKE_CASE__ = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
SCREAMING_SNAKE_CASE__ = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
SCREAMING_SNAKE_CASE__ = 'README.md'
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase = f.read()
UpperCamelCase ,UpperCamelCase = REPLACE_PATTERNS[pattern]
UpperCamelCase = replace.replace("""VERSION""" , __UpperCamelCase )
UpperCamelCase = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> Dict:
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=False )-> int:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def lowercase__ ( )-> List[Any]:
UpperCamelCase = """🤗 Transformers currently provides the following architectures"""
UpperCamelCase = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase = f.readlines()
# Find the start of the list.
UpperCamelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
UpperCamelCase = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def lowercase__ ( )-> str:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
UpperCamelCase = f.read()
UpperCamelCase = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase=False )-> Union[str, Any]:
UpperCamelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
UpperCamelCase = default_version.base_version
elif patch:
UpperCamelCase = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCamelCase = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCamelCase = input(F"Which version are you releasing? [{default_version}]" )
if len(__UpperCamelCase ) == 0:
UpperCamelCase = default_version
print(F"Updating version to {version}." )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def lowercase__ ( )-> Any:
UpperCamelCase = get_version()
UpperCamelCase = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCamelCase = current_version.base_version
# Check with the user we got that right.
UpperCamelCase = input(F"Which version are we developing now? [{dev_version}]" )
if len(__UpperCamelCase ) == 0:
UpperCamelCase = dev_version
print(F"Updating version to {version}." )
global_version_update(__UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 35 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Any:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 35 | 1 |
"""simple docstring"""
import os
def a__ ( ) -> Optional[int]:
__lowerCAmelCase: Tuple = os.path.dirname(os.path.realpath(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , "triangle.txt" )
with open(__SCREAMING_SNAKE_CASE ) as f:
__lowerCAmelCase: List[Any] = f.readlines()
__lowerCAmelCase: int = []
for line in triangle:
__lowerCAmelCase: List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__SCREAMING_SNAKE_CASE ) )
a.append(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
for j in range(len(a[i] ) ):
__lowerCAmelCase: Any = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowerCAmelCase: Optional[Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 346 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : List[str] = (DDPMScheduler,)
def lowercase_ ( self : List[str] , **UpperCamelCase__ : str)-> str:
'''simple docstring'''
__lowerCAmelCase: List[Any] = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCamelCase__)
return config
def lowercase_ ( self : int)-> Dict:
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__)
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__)
def lowercase_ ( self : str)-> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase__)
def lowercase_ ( self : Union[str, Any])-> int:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase__)
def lowercase_ ( self : Tuple)-> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__)
def lowercase_ ( self : str)-> str:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase__)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , )
def lowercase_ ( self : List[str])-> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> Union[str, Any]:
'''simple docstring'''
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCamelCase__)
def lowercase_ ( self : Union[str, Any])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config()
__lowerCAmelCase: Dict = scheduler_class(**UpperCamelCase__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.02)) < 1e-5
def lowercase_ ( self : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: int = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config()
__lowerCAmelCase: Optional[int] = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: List[str] = len(UpperCamelCase__)
__lowerCAmelCase: List[Any] = self.dummy_model()
__lowerCAmelCase: Tuple = self.dummy_sample_deter
__lowerCAmelCase: Tuple = torch.manual_seed(0)
for t in reversed(range(UpperCamelCase__)):
# 1. predict noise residual
__lowerCAmelCase: List[str] = model(UpperCamelCase__ , UpperCamelCase__)
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase: List[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowerCAmelCase: List[Any] = pred_prev_sample
__lowerCAmelCase: List[str] = torch.sum(torch.abs(UpperCamelCase__))
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def lowercase_ ( self : int)-> Any:
'''simple docstring'''
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: str = self.get_scheduler_config(prediction_type="v_prediction")
__lowerCAmelCase: Dict = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: Dict = len(UpperCamelCase__)
__lowerCAmelCase: Any = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
__lowerCAmelCase: str = torch.manual_seed(0)
for t in reversed(range(UpperCamelCase__)):
# 1. predict noise residual
__lowerCAmelCase: str = model(UpperCamelCase__ , UpperCamelCase__)
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase: Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowerCAmelCase: Optional[Any] = pred_prev_sample
__lowerCAmelCase: Union[str, Any] = torch.sum(torch.abs(UpperCamelCase__))
__lowerCAmelCase: Dict = torch.mean(torch.abs(UpperCamelCase__))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.scheduler_classes[0]
__lowerCAmelCase: List[Any] = self.get_scheduler_config()
__lowerCAmelCase: List[Any] = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: Optional[int] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase__)
__lowerCAmelCase: str = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase__):
if i == len(UpperCamelCase__) - 1:
__lowerCAmelCase: str = -1
else:
__lowerCAmelCase: str = timesteps[i + 1]
__lowerCAmelCase: List[Any] = scheduler.previous_timestep(UpperCamelCase__)
__lowerCAmelCase: List[str] = prev_t.item()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: int = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCamelCase__ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCamelCase__)
def lowercase_ ( self : Any)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.scheduler_classes[0]
__lowerCAmelCase: Any = self.get_scheduler_config()
__lowerCAmelCase: Dict = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: List[str] = [1_0_0, 8_7, 5_0, 1, 0]
__lowerCAmelCase: Tuple = len(UpperCamelCase__)
with self.assertRaises(UpperCamelCase__ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase__ , timesteps=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> int:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.scheduler_classes[0]
__lowerCAmelCase: Any = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase__ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCamelCase__)
| 346 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ = logging.getLogger()
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ =os.path.join(__UpperCamelCase, """all_results.json""" )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase, """r""" ) as f:
SCREAMING_SNAKE_CASE__ =json.load(__UpperCamelCase )
else:
raise ValueError(f"""can't find {path}""" )
return results
lowerCamelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __A ( self : int ) -> Optional[Any]:
'''simple docstring'''
import xla_spawn
SCREAMING_SNAKE_CASE__ =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_UpperCamelCase ,"""argv""" ,_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =time()
xla_spawn.main()
SCREAMING_SNAKE_CASE__ =time()
SCREAMING_SNAKE_CASE__ =get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start ,5_0_0 )
def __A ( self : List[str] ) -> Tuple:
'''simple docstring'''
import xla_spawn
SCREAMING_SNAKE_CASE__ ="""
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(_UpperCamelCase ,"""argv""" ,_UpperCamelCase ):
xla_spawn.main()
| 588 |
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( a : Union[str, Any] ):
a__ = str(__UpperCAmelCase )
return len(__UpperCAmelCase ) == 9 and set(__UpperCAmelCase ) == set('123456789' )
def lowerCAmelCase_ ( ):
for base_num in range(9999 , 4999 , -1 ):
a__ = 100002 * base_num
if is_9_pandigital(__UpperCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
a__ = 1002003 * base_num
if is_9_pandigital(__UpperCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 394 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
"""simple docstring"""
snake_case: Any ={
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case: Dict =int(re.match(R'.*layer_(\d*).*' , __UpperCAmelCase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def a_ ( __UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
snake_case: Optional[Any] =re.search(R'[^\d](\d+)$' , str(__UpperCAmelCase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
snake_case: str =int(bit_search.groups()[0] )
return bit_size // 8
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
"""simple docstring"""
if bloom_config_file == "":
snake_case: Tuple =BloomConfig()
else:
snake_case: List[Any] =BloomConfig.from_json_file(__UpperCAmelCase )
if shard_model:
snake_case: Any =os.listdir(__UpperCAmelCase )
snake_case: Any =sorted(filter(lambda __UpperCAmelCase : s.startswith('layer' ) and "model_00" in s , __UpperCAmelCase ) )
snake_case: int ={'weight_map': {}, 'metadata': {}}
snake_case: Dict =0
snake_case: List[str] =None
snake_case: Any =BloomConfig()
for j, file in enumerate(__UpperCAmelCase ):
print('Processing file: {}'.format(__UpperCAmelCase ) )
snake_case: Any =None
for i in range(__UpperCAmelCase ):
# load all TP files
snake_case: Optional[int] =file.replace('model_00' , f'''model_0{i}''' )
snake_case: List[str] =torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case: int =list(temp.keys() )
for key in keys:
snake_case: List[Any] =temp.pop(__UpperCAmelCase )
if tensors is None:
snake_case: Tuple =temp
else:
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case: Optional[int] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case: str =torch.cat([tensors[key], temp[key]] , dim=__UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case: Dict =tensors[key] / pretraining_tp
torch.save(
__UpperCAmelCase , os.path.join(
__UpperCAmelCase , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(__UpperCAmelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case: List[str] =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case: List[Any] ='pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(__UpperCAmelCase ) ).zfill(5 ) )
snake_case: Tuple =BloomConfig()
snake_case: Union[str, Any] =pytorch_dump_folder_path + '/' + CONFIG_NAME
snake_case: Any =total_size
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__UpperCAmelCase , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
snake_case: Tuple =json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + '\n'
f.write(__UpperCAmelCase )
else:
snake_case: Optional[Any] =BloomModel(__UpperCAmelCase )
snake_case: Any =os.listdir(__UpperCAmelCase )
snake_case: Optional[int] =sorted(filter(lambda __UpperCAmelCase : s.startswith('layer' ) and "model_00" in s , __UpperCAmelCase ) )
snake_case: str =None
for i, file in enumerate(__UpperCAmelCase ):
snake_case: List[str] =None
for i in range(__UpperCAmelCase ):
# load all TP files
snake_case: int =file.replace('model_00' , f'''model_0{i}''' )
snake_case: Optional[Any] =torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case: List[str] =list(temp.keys() )
for key in keys:
snake_case: int =temp.pop(__UpperCAmelCase )
if tensors is None:
snake_case: Any =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case: List[Any] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case: Tuple =torch.cat([tensors[key], temp[key]] , dim=__UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case: int =tensors[key] / pretraining_tp
snake_case: Optional[Any] =model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
snake_case: Optional[Any] =set(other_keys.missing_keys )
else:
snake_case: Any =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
snake_case: Optional[int] =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
snake_case: Any =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
snake_case: Optional[Any] =model.to(config.torch_dtype )
torch.save(model.state_dict() , __UpperCAmelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 350 | 0 |
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
def snake_case ( snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :List[str] = None , ) -> Optional[Any]:
_A = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''')
_A = formatter_cls
for alias in set(aliases + [format_type]):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''')
_A = format_type
def snake_case ( snake_case__ :int , snake_case__ :Any , snake_case__ :Tuple = None) -> Union[str, Any]:
_A = aliases if aliases is not None else []
for alias in set(aliases + [format_type]):
_A = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
_SCREAMING_SNAKE_CASE = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
_SCREAMING_SNAKE_CASE = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
_SCREAMING_SNAKE_CASE = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def snake_case ( snake_case__ :Any) -> Union[str, Any]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def snake_case ( snake_case__ :Optional[Any] , **snake_case__ :int) -> Tuple:
_A = get_format_type_from_alias(lowerCAmelCase_)
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCAmelCase_)
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got \'{format_type}\'''')
| 703 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self ) -> Optional[int]:
_A = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowerCAmelCase_ ):
self.assertDictEqual(lowerCAmelCase_ , example_records[i] )
def UpperCAmelCase ( self ) -> str:
_A = self._create_example_records()
_A = Dataset.from_list(lowerCAmelCase_ )
_A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCAmelCase ( self ) -> Any: # checks what happens with missing columns
_A = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def UpperCAmelCase ( self ) -> Tuple: # checks if the type can be inferred from the second record
_A = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A = Dataset.from_list(lowerCAmelCase_ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def UpperCAmelCase ( self ) -> Any:
_A = Dataset.from_list([] )
self.assertEqual(len(lowerCAmelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 83 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 530 |
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase ) -> bool:
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 530 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case : Dict = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "AutoTokenizer"
__UpperCAmelCase : Dict = ["tokenizer"]
__UpperCAmelCase : Union[str, Any] = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : str , lowerCamelCase : int , lowerCamelCase : int=None ) -> Optional[int]:
super().__init__(__A )
__snake_case : Optional[Any] = speaker_embeddings
@classmethod
def __snake_case ( cls : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]="speaker_embeddings_path.json" , **lowerCamelCase : Union[str, Any] ) -> Tuple:
if speaker_embeddings_dict_path is not None:
__snake_case : int = get_file_from_repo(
__A , __A , subfolder=kwargs.pop("subfolder" , __A ) , cache_dir=kwargs.pop("cache_dir" , __A ) , force_download=kwargs.pop("force_download" , __A ) , proxies=kwargs.pop("proxies" , __A ) , resume_download=kwargs.pop("resume_download" , __A ) , local_files_only=kwargs.pop("local_files_only" , __A ) , use_auth_token=kwargs.pop("use_auth_token" , __A ) , revision=kwargs.pop("revision" , __A ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(__A , __A )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
__snake_case : Dict = None
else:
with open(__A ) as speaker_embeddings_json:
__snake_case : Union[str, Any] = json.load(__A )
else:
__snake_case : Optional[int] = None
__snake_case : Any = AutoTokenizer.from_pretrained(__A , **__A )
return cls(tokenizer=__A , speaker_embeddings=__A )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]="speaker_embeddings_path.json" , lowerCamelCase : int="speaker_embeddings" , lowerCamelCase : bool = False , **lowerCamelCase : Tuple , ) -> List[str]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__A , __A , "v2" ) , exist_ok=__A )
__snake_case : Dict = {}
__snake_case : Any = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__snake_case : Union[str, Any] = self._load_voice_preset(__A )
__snake_case : List[str] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , __A , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=__A , )
__snake_case : List[str] = os.path.join(__A , F'{prompt_key}_{key}.npy' )
__snake_case : Any = tmp_dict
with open(os.path.join(__A , __A ) , "w" ) as fp:
json.dump(__A , __A )
super().save_pretrained(__A , __A , **__A )
def __snake_case ( self : Any , lowerCamelCase : str = None , **lowerCamelCase : Optional[int] ) -> int:
__snake_case : Optional[int] = self.speaker_embeddings[voice_preset]
__snake_case : int = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
__snake_case : List[str] = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , __A ) , cache_dir=kwargs.pop("cache_dir" , __A ) , force_download=kwargs.pop("force_download" , __A ) , proxies=kwargs.pop("proxies" , __A ) , resume_download=kwargs.pop("resume_download" , __A ) , local_files_only=kwargs.pop("local_files_only" , __A ) , use_auth_token=kwargs.pop("use_auth_token" , __A ) , revision=kwargs.pop("revision" , __A ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
__snake_case : int = np.load(__A )
return voice_preset_dict
def __snake_case ( self : Any , lowerCamelCase : Optional[dict] = None ) -> Tuple:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : Tuple , lowerCamelCase : List[Any]=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : str="pt" , lowerCamelCase : Tuple=256 , lowerCamelCase : Dict=False , lowerCamelCase : List[Any]=True , lowerCamelCase : Dict=False , **lowerCamelCase : Tuple , ) -> Tuple:
if voice_preset is not None and not isinstance(__A , __A ):
if (
isinstance(__A , __A )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__snake_case : Optional[Any] = self._load_voice_preset(__A )
else:
if isinstance(__A , __A ) and not voice_preset.endswith(".npz" ):
__snake_case : List[str] = voice_preset + ".npz"
__snake_case : List[Any] = np.load(__A )
if voice_preset is not None:
self._validate_voice_preset_dict(__A , **__A )
__snake_case : List[str] = BatchFeature(data=__A , tensor_type=__A )
__snake_case : Dict = self.tokenizer(
__A , return_tensors=__A , padding="max_length" , max_length=__A , return_attention_mask=__A , return_token_type_ids=__A , add_special_tokens=__A , **__A , )
if voice_preset is not None:
__snake_case : Any = voice_preset
return encoded_text
| 711 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : str ) -> Dict:
__snake_case : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "embed_dim" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_heads" ) )
class a :
"""simple docstring"""
def __init__( self : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any=13 , lowerCamelCase : Any=64 , lowerCamelCase : int=3 , lowerCamelCase : Tuple=[16, 48, 96] , lowerCamelCase : Optional[int]=[1, 3, 6] , lowerCamelCase : List[str]=[1, 2, 10] , lowerCamelCase : Any=[7, 3, 3] , lowerCamelCase : Any=[4, 2, 2] , lowerCamelCase : Optional[Any]=[2, 1, 1] , lowerCamelCase : str=[2, 2, 2] , lowerCamelCase : Dict=[False, False, True] , lowerCamelCase : Dict=[0.0, 0.0, 0.0] , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Tuple=1E-12 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=2 , ) -> Optional[int]:
__snake_case : Tuple = parent
__snake_case : List[str] = batch_size
__snake_case : Optional[int] = image_size
__snake_case : Optional[int] = patch_sizes
__snake_case : Union[str, Any] = patch_stride
__snake_case : int = patch_padding
__snake_case : Optional[Any] = is_training
__snake_case : Optional[Any] = use_labels
__snake_case : Tuple = num_labels
__snake_case : Union[str, Any] = num_channels
__snake_case : Tuple = embed_dim
__snake_case : List[str] = num_heads
__snake_case : Dict = stride_kv
__snake_case : Optional[int] = depth
__snake_case : List[Any] = cls_token
__snake_case : List[Any] = attention_drop_rate
__snake_case : Any = initializer_range
__snake_case : str = layer_norm_eps
def __snake_case ( self : List[str] ) -> str:
__snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Tuple = None
if self.use_labels:
__snake_case : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Tuple ) -> Optional[Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __snake_case ( self : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str ) -> Tuple:
__snake_case : int = CvtModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
__snake_case : List[Any] = (self.image_size, self.image_size)
__snake_case , __snake_case : List[str] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__snake_case : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__snake_case : Union[str, Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : int ) -> Optional[Any]:
__snake_case : str = self.num_labels
__snake_case : Any = CvtForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : str = config_and_inputs
__snake_case : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[str] = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : List[Any] = False
def __snake_case ( self : str ) -> List[str]:
__snake_case : Tuple = CvtModelTester(self )
__snake_case : Tuple = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def __snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Tuple ) -> Union[str, Any]:
return
@unittest.skip(reason="Cvt does not output attentions" )
def __snake_case ( self : Optional[int] ) -> List[str]:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __snake_case ( self : List[str] ) -> Any:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __snake_case ( self : int ) -> List[Any]:
pass
def __snake_case ( self : int ) -> int:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(lowerCamelCase )
__snake_case : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : str ) -> Dict:
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : int ) -> Optional[Any]:
def check_hidden_states_output(lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ):
__snake_case : Any = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : List[Any] = outputs.hidden_states
__snake_case : Optional[int] = len(self.model_tester.depth )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[str] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Optional[Any]:
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : List[str] ) -> Optional[int]:
pass
@slow
def __snake_case ( self : Tuple ) -> List[str]:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = CvtModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Optional[Any] ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __snake_case ( self : int ) -> Tuple:
__snake_case : Optional[Any] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase )
__snake_case : Optional[int] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : Dict = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**lowerCamelCase )
# verify the logits
__snake_case : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : Optional[Any] = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
| 203 | 0 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 626 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class snake_case :
def __init__(self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE_ = [1, 2, 3, 4]
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = len(self.first_signal )
SCREAMING_SNAKE_CASE_ = len(self.second_signal )
SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE_ = [[0] * max_length for i in range(SCREAMING_SNAKE_CASE_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = deque(self.second_signal )
rotated_signal.rotate(SCREAMING_SNAKE_CASE_ )
for j, item in enumerate(SCREAMING_SNAKE_CASE_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE_ = np.matmul(np.transpose(SCREAMING_SNAKE_CASE_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(SCREAMING_SNAKE_CASE_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 626 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = RobertaPreLayerNormConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__lowerCamelCase : Optional[Any] = torch.load(hf_hub_download(repo_id=SCREAMING_SNAKE_CASE__ , filename='pytorch_model.bin' ) )
__lowerCamelCase : int = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__lowerCamelCase : Optional[Any] = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__lowerCamelCase : int = tensor_value
__lowerCamelCase : Optional[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ , state_dict=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert tokenizer
__lowerCamelCase : int = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 230 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if point:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ):
__lowerCamelCase : List[Any] = (
'Expected a list of numbers as input, found '
f'{type(SCREAMING_SNAKE_CASE__ ).__name__}'
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : Tuple = f'Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE__ ).__name__}'
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError('Missing an input' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 | 1 |
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = False ) ->bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
__magic_name__ : Optional[Any] = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
__magic_name__ : int = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCAmelCase, 1 ):
if n < _p:
# then we have our last prime to check
__magic_name__ : List[Any] = primes[:idx]
break
__magic_name__ , __magic_name__ : Any = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__magic_name__ : str = False
for r in range(UpperCAmelCase ):
__magic_name__ : Union[str, Any] = pow(UpperCAmelCase, d * 2**r, UpperCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__magic_name__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCAmelCase ( ) ->None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 154 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Union[str, Any] ="xlm"
lowerCamelCase__ : Any ={
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , lowerCamelCase=30145 , lowerCamelCase=2048 , lowerCamelCase=12 , lowerCamelCase=16 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=1 , lowerCamelCase=True , lowerCamelCase=512 , lowerCamelCase=2048**-0.5 , lowerCamelCase=1e-12 , lowerCamelCase=0.0_2 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=5 , lowerCamelCase=True , lowerCamelCase="first" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=5 , lowerCamelCase=5 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=0 , **lowerCamelCase , ) -> Dict:
"""simple docstring"""
__magic_name__ : List[Any] = vocab_size
__magic_name__ : str = emb_dim
__magic_name__ : Union[str, Any] = n_layers
__magic_name__ : Optional[Any] = n_heads
__magic_name__ : Dict = dropout
__magic_name__ : List[str] = attention_dropout
__magic_name__ : Optional[Any] = gelu_activation
__magic_name__ : Any = sinusoidal_embeddings
__magic_name__ : List[Any] = causal
__magic_name__ : Optional[Any] = asm
__magic_name__ : Tuple = n_langs
__magic_name__ : Union[str, Any] = use_lang_emb
__magic_name__ : str = layer_norm_eps
__magic_name__ : int = bos_index
__magic_name__ : int = eos_index
__magic_name__ : Any = pad_index
__magic_name__ : int = unk_index
__magic_name__ : Tuple = mask_index
__magic_name__ : int = is_encoder
__magic_name__ : Any = max_position_embeddings
__magic_name__ : List[Any] = embed_init_std
__magic_name__ : int = init_std
__magic_name__ : Optional[Any] = summary_type
__magic_name__ : List[str] = summary_use_proj
__magic_name__ : Optional[Any] = summary_activation
__magic_name__ : Union[str, Any] = summary_proj_to_labels
__magic_name__ : int = summary_first_dropout
__magic_name__ : Dict = start_n_top
__magic_name__ : int = end_n_top
__magic_name__ : Optional[int] = mask_token_id
__magic_name__ : Dict = lang_id
if "n_words" in kwargs:
__magic_name__ : str = kwargs['''n_words''']
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , **lowerCamelCase )
class A__ ( __SCREAMING_SNAKE_CASE ):
@property
def lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__magic_name__ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__magic_name__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 154 | 1 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case):
# Initialise PyTorch model
__snake_case = BigBirdConfig.from_json_file(snake_case)
print(f"Building PyTorch model from configuration: {config}")
if is_trivia_qa:
__snake_case = BigBirdForQuestionAnswering(snake_case)
else:
__snake_case = BigBirdForPreTraining(snake_case)
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(snake_case, snake_case, is_trivia_qa=snake_case)
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}")
model.save_pretrained(snake_case)
if __name__ == "__main__":
__lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
__lowercase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 93 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : int ) -> Tuple:
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__snake_case = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_euler''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase ( self : Optional[Any] ) -> Tuple:
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_euler''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowercase ( self : List[str] ) -> Optional[Any]:
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , generator=A_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=A_ , )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 93 | 1 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __A ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase_ = 42
class __A ( snake_case_ , snake_case_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = attention_head_dim
lowerCamelCase__ = num_attention_heads * attention_head_dim
lowerCamelCase__ = in_channels
lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# 3. Define transformers blocks
lowerCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape
lowerCamelCase__ = batch_frames // num_frames
lowerCamelCase__ = hidden_states
lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__ = self.norm(__lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.proj_in(__lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__ = block(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , )
# 3. Output
lowerCamelCase__ = self.proj_out(__lowerCAmelCase )
lowerCamelCase__ = (
hidden_states[None, None, :]
.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
| 481 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase = random.Random()
if is_torch_available():
import torch
def __UpperCAmelCase ( a_ , a_=1.0 , a_=None , a_=None):
if rng is None:
snake_case_ = global_rng
snake_case_ = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , a , a=7 , a=4_00 , a=20_00 , a=1 , a=0.0 , a=1_60_00 , a=True , a=True , ) -> Tuple:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = min_seq_length
snake_case_ = max_seq_length
snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ = feature_size
snake_case_ = padding_value
snake_case_ = sampling_rate
snake_case_ = return_attention_mask
snake_case_ = do_normalize
def _UpperCamelCase ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase ( self , a=False , a=False ) -> Optional[int]:
def _flatten(a ):
return list(itertools.chain(*a ) )
if equal_length:
snake_case_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = ASTFeatureExtractor
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = ASTFeatureExtractionTester(self )
def _UpperCamelCase ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = [np.asarray(a ) for speech_input in speech_inputs]
# Test not batched input
snake_case_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
snake_case_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test batched
snake_case_ = feat_extract(a , padding=a , return_tensors='np' ).input_values
snake_case_ = feat_extract(a , padding=a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case_ = np.asarray(a )
snake_case_ = feat_extract(a , return_tensors='np' ).input_values
snake_case_ = feat_extract(a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
@require_torch
def _UpperCamelCase ( self ) -> List[str]:
import torch
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = np.random.rand(1_00 ).astype(np.floataa )
snake_case_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _UpperCamelCase ( self , a ) -> Tuple:
from datasets import load_dataset
snake_case_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case_ = ds.sort('id' ).select(range(a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def _UpperCamelCase ( self ) -> Optional[int]:
# fmt: off
snake_case_ = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
snake_case_ = self._load_datasamples(1 )
snake_case_ = ASTFeatureExtractor()
snake_case_ = feature_extractor(a , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a , atol=1E-4 ) )
| 198 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_UpperCamelCase = "http://www.mocksite.com/file1.txt"
_UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
_UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class __lowercase :
_UpperCamelCase = 200
_UpperCamelCase = {"""Content-Length""": """100"""}
_UpperCamelCase = {}
def UpperCamelCase__ ( self , **A_ ) ->str:
'''simple docstring'''
return [bytes(A_ , '''utf-8''' )]
def _lowercase ( *lowercase__ , **lowercase__ ):
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
import requests
monkeypatch.setattr(lowercase__ , '''request''' , lowercase__ )
__lowerCAmelCase : List[Any] = URL
if issubclass(lowercase__ , lowercase__ ):
__lowerCAmelCase : Dict = url
elif issubclass(lowercase__ , lowercase__ ):
__lowerCAmelCase : int = [url]
elif issubclass(lowercase__ , lowercase__ ):
__lowerCAmelCase : List[Any] = {'''train''': url}
__lowerCAmelCase : List[Any] = '''dummy'''
__lowerCAmelCase : Tuple = '''downloads'''
__lowerCAmelCase : List[str] = tmp_path
__lowerCAmelCase : Optional[int] = DownloadConfig(
cache_dir=os.path.join(lowercase__ , lowercase__ ) , use_etag=lowercase__ , )
__lowerCAmelCase : Any = DownloadManager(dataset_name=lowercase__ , download_config=lowercase__ )
__lowerCAmelCase : Dict = dl_manager.download(lowercase__ )
__lowerCAmelCase : Optional[Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowercase__ , lowercase__ ):
__lowerCAmelCase : int = [downloaded_paths]
__lowerCAmelCase : Optional[Any] = [urls]
elif isinstance(lowercase__ , lowercase__ ):
assert "train" in downloaded_paths.keys()
__lowerCAmelCase : Dict = downloaded_paths.values()
__lowerCAmelCase : str = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowercase__ , lowercase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__lowerCAmelCase : str = Path(lowercase__ )
__lowerCAmelCase : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__lowerCAmelCase : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
__lowerCAmelCase : Tuple = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
__lowerCAmelCase : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Dict = str(lowercase__ )
if issubclass(lowercase__ , lowercase__ ):
__lowerCAmelCase : int = filename
elif issubclass(lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = [filename]
elif issubclass(lowercase__ , lowercase__ ):
__lowerCAmelCase : List[Any] = {'''train''': filename}
__lowerCAmelCase : Union[str, Any] = '''dummy'''
__lowerCAmelCase : List[Any] = xz_file.parent
__lowerCAmelCase : Union[str, Any] = '''extracted'''
__lowerCAmelCase : Optional[int] = DownloadConfig(
cache_dir=lowercase__ , use_etag=lowercase__ , )
__lowerCAmelCase : Any = DownloadManager(dataset_name=lowercase__ , download_config=lowercase__ )
__lowerCAmelCase : Dict = dl_manager.extract(lowercase__ )
__lowerCAmelCase : str = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowercase__ , lowercase__ ):
__lowerCAmelCase : Dict = [extracted_paths]
__lowerCAmelCase : Union[str, Any] = [paths]
elif isinstance(lowercase__ , lowercase__ ):
assert "train" in extracted_paths.keys()
__lowerCAmelCase : Tuple = extracted_paths.values()
__lowerCAmelCase : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowercase__ , lowercase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__lowerCAmelCase : Optional[Any] = Path(lowercase__ )
__lowerCAmelCase : Union[str, Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowercase__ , etag=lowercase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__lowerCAmelCase : Union[str, Any] = extracted_path.read_text()
__lowerCAmelCase : List[Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def _lowercase ( lowercase__ , lowercase__ ):
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(lowercase__ , start=1 ):
__lowerCAmelCase : Tuple = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Dict = request.getfixturevalue(lowercase__ )
__lowerCAmelCase : Dict = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ) , start=1 ):
_test_jsonl(lowercase__ , lowercase__ )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : str = request.getfixturevalue(lowercase__ )
__lowerCAmelCase : Dict = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowercase__ ) , start=1 ):
_test_jsonl(lowercase__ , lowercase__ )
assert num_tar == 1
assert num_jsonl == 2
def _lowercase ( lowercase__ ):
__lowerCAmelCase : int = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowercase__ ) , start=1 ):
assert os.path.basename(lowercase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 583 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_UpperCamelCase = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowercase :
def __init__( self , A_ , A_=16 , A_=13 , A_=7 , A_=14 , A_=10 , A_=19 , A_=5 , A_=4 , A_=True , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=[1, 2, 3, 4, 5] , A_=25 , A_=5 , ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = d_model
__lowerCAmelCase : Any = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : int = prediction_length
__lowerCAmelCase : str = context_length
__lowerCAmelCase : Any = cardinality
__lowerCAmelCase : Tuple = num_time_features
__lowerCAmelCase : List[str] = lags_sequence
__lowerCAmelCase : Any = embedding_dimension
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = context_length
__lowerCAmelCase : Any = prediction_length + label_length
__lowerCAmelCase : Union[str, Any] = label_length
__lowerCAmelCase : Any = moving_average
__lowerCAmelCase : Optional[int] = autocorrelation_factor
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCamelCase__ ( self , A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = config.context_length + max(config.lags_sequence )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase : Dict = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase : str = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase : Optional[int] = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.get_config()
__lowerCAmelCase : Tuple = self.prepare_autoformer_inputs_dict(A_ )
return config, inputs_dict
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = AutoformerModel(config=A_ ).to(A_ ).eval()
__lowerCAmelCase : Optional[int] = model(**A_ )
__lowerCAmelCase : Dict = outputs.encoder_last_hidden_state
__lowerCAmelCase : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Tuple = model.get_encoder()
encoder.save_pretrained(A_ )
__lowerCAmelCase : List[Any] = AutoformerEncoder.from_pretrained(A_ ).to(A_ )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Any = model.create_network_inputs(**A_ )
__lowerCAmelCase, __lowerCAmelCase : Dict = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase : Optional[Any] = encoder(inputs_embeds=A_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase : List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase : Dict = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Dict = model.get_decoder()
decoder.save_pretrained(A_ )
__lowerCAmelCase : Any = AutoformerDecoder.from_pretrained(A_ ).to(A_ )
__lowerCAmelCase : List[str] = decoder(
trend=A_ , inputs_embeds=A_ , encoder_hidden_states=A_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = AutoformerModelTester(self )
__lowerCAmelCase : Dict = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = model_class.from_pretrained(A_ , output_loading_info=A_ )
self.assertEqual(info['''missing_keys'''] , [] )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A_ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
pass
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = inspect.signature(getattr(A_ , '''forward''' ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase : Optional[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : str = model_class(A_ )
__lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCAmelCase : str = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A_ )] , A_ )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Tuple = getattr(self.model_tester , '''seq_length''' , A_ )
__lowerCAmelCase : Tuple = getattr(self.model_tester , '''decoder_seq_length''' , A_ )
__lowerCAmelCase : Any = getattr(self.model_tester , '''encoder_seq_length''' , A_ )
__lowerCAmelCase : List[str] = getattr(self.model_tester , '''d_model''' , A_ )
__lowerCAmelCase : int = getattr(self.model_tester , '''num_attention_heads''' , A_ )
__lowerCAmelCase : Union[str, Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase : Any = True
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[int] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : str = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Union[str, Any] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Any = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Dict = outputs.encoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase : Optional[Any] = len(A_ )
__lowerCAmelCase : List[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A_ , A_ )
# decoder attentions
__lowerCAmelCase : List[str] = outputs.decoder_attentions
self.assertIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase : Any = outputs.cross_attentions
self.assertIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Dict = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + 2 , len(A_ ) )
__lowerCAmelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def _lowercase ( lowercase__="train-batch.pt" ):
__lowerCAmelCase : List[str] = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowercase__ , repo_type='''dataset''' )
__lowerCAmelCase : Tuple = torch.load(lowercase__ , map_location=lowercase__ )
return batch
@require_torch
@slow
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Tuple = prepare_batch()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__lowerCAmelCase : Dict = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Tuple = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__lowerCAmelCase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A_ )
__lowerCAmelCase : Any = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=A_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , A_ , atol=A_ ) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A_ )
__lowerCAmelCase : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__lowerCAmelCase : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A_ )
__lowerCAmelCase : Optional[Any] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=A_ )
__lowerCAmelCase : Tuple = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A_ , rtol=1e-1 ) )
| 583 | 1 |
import inspect
import unittest
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
import diffusers
from diffusers.dependency_versions_table import deps
__a = inspect.getmembers(UpperCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__a = 'k-diffusion'
elif backend == "invisible_watermark":
__a = 'invisible-watermark'
assert backend in deps, f'''{backend} is not in the deps table!'''
| 559 |
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase_ : int = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase_ : Any = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def lowerCAmelCase( __lowerCamelCase ):
re.sub('<n>' , '' , __lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
| 559 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase (metaclass=a__ ):
_lowercase : str = ["""note_seq"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 710 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase : List[str] = logging.getLogger(__name__)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if os.path.exists(lowerCAmelCase_ ):
if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''config.json''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) )
if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case : Optional[Any] = 2
if unlogit:
_snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ )
_snake_case : Optional[Any] = 0
return -plogp.sum(dim=-1 )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) )
for row in range(len(lowerCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
if head_mask is None:
_snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : Dict = None
_snake_case : Dict = 0.0
_snake_case : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : List[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase_ ):
_snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : Any = 2
_snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(lowerCAmelCase_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(lowerCAmelCase_ )
logger.info('''Head ranked by importance scores''' )
_snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : List[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ )
print_ad_tensor(lowerCAmelCase_ )
return attn_entropy, head_importance, total_loss
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ )
_snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold )
_snake_case : int = torch.ones_like(lowerCAmelCase_ )
_snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : int = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Dict = float('''Inf''' )
_snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : str = new_head_mask.view_as(lowerCAmelCase_ )
_snake_case : Dict = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase_ )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : Any = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : int = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(lowerCAmelCase_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : Tuple = 1 / loss
_snake_case : Dict = datetime.now() - before_time
_snake_case : List[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Union[str, Any] = [
v,
]
assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase_ )
_snake_case : List[str] = sum(p.numel() for p in model.parameters() )
_snake_case : int = datetime.now()
_snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , )
_snake_case : Optional[int] = 1 / loss
_snake_case : Dict = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(lowerCAmelCase_ , args.output_dir )
def _a ( ):
"""simple docstring"""
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
_snake_case : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : List[str] = torch.device('''cuda''' , args.local_rank )
_snake_case : int = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : Optional[int] = nn.parallel.DistributedDataParallel(
lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ )
elif args.n_gpu > 1:
_snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ )
torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ )
# Prepare dataset
_snake_case : Dict = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),)
_snake_case : Tuple = TensorDataset(*lowerCAmelCase_ )
_snake_case : List[str] = RandomSampler(lowerCAmelCase_ )
_snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 47 | 0 |
def __UpperCAmelCase ( __A ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
UpperCAmelCase__ = grid[0]
for row_n in range(1 , len(__A ) ):
UpperCAmelCase__ = grid[row_n]
UpperCAmelCase__ = fill_row(__A , __A )
UpperCAmelCase__ = grid[row_n]
return grid[-1][-1]
def __UpperCAmelCase ( __A , __A ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__A ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 475 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= 42
A__= 42
def __init__( self : Tuple , _lowercase : UNetaDModel , _lowercase : ScoreSdeVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self : Dict , _lowercase : int = 1 , _lowercase : int = 20_00 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , **_lowercase : Any , ):
"""simple docstring"""
UpperCAmelCase__ = self.unet.config.sample_size
UpperCAmelCase__ = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ = self.unet
UpperCAmelCase__ = randn_tensor(_lowercase , generator=_lowercase ) * self.scheduler.init_noise_sigma
UpperCAmelCase__ = sample.to(self.device )
self.scheduler.set_timesteps(_lowercase )
self.scheduler.set_sigmas(_lowercase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase__ = self.unet(_lowercase , _lowercase ).sample
UpperCAmelCase__ = self.scheduler.step_correct(_lowercase , _lowercase , generator=_lowercase ).prev_sample
# prediction step
UpperCAmelCase__ = model(_lowercase , _lowercase ).sample
UpperCAmelCase__ = self.scheduler.step_pred(_lowercase , _lowercase , _lowercase , generator=_lowercase )
UpperCAmelCase__ , UpperCAmelCase__ = output.prev_sample, output.prev_sample_mean
UpperCAmelCase__ = sample_mean.clamp(0 , 1 )
UpperCAmelCase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowercase )
| 475 | 1 |
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = 2
__lowerCamelCase : str = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase__ )
if n > 1:
factors.append(lowercase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 230 | 0 |
'''simple docstring'''
import unittest
import numpy as np
def _lowerCAmelCase ( __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase : List[str] =np.shape(__magic_name__ )
lowercase : Optional[Any] =np.shape(__magic_name__ )
lowercase : Dict =np.shape(__magic_name__ )
if shape_a[0] != shape_b[0]:
lowercase : Optional[Any] =(
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(__magic_name__ )
if shape_b[1] != shape_c[1]:
lowercase : Optional[Any] =(
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(__magic_name__ )
lowercase : List[str] =pseudo_inv
if a_inv is None:
try:
lowercase : Optional[Any] =np.linalg.inv(__magic_name__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Optional[Any] =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase : Dict =np.array([[0, 3], [3, 0], [2, 3]] )
lowercase : Union[str, Any] =np.array([[2, 1], [6, 3]] )
lowercase : Union[str, Any] =schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =np.block([[a, b], [b.T, c]] )
lowercase : Union[str, Any] =np.linalg.det(UpperCAmelCase__ )
lowercase : List[Any] =np.linalg.det(UpperCAmelCase__ )
lowercase : List[str] =np.linalg.det(UpperCAmelCase__ )
self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Any =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase : Optional[Any] =np.array([[0, 3], [3, 0], [2, 3]] )
lowercase : Tuple =np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[Any] =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase : Optional[int] =np.array([[0, 3], [3, 0], [2, 3]] )
lowercase : Optional[Any] =np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 92 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = XLMRobertaTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = '<pad>'
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(__a ) , 1002 )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = XLMRobertaTokenizer(__a , keep_accents=__a )
UpperCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@cached_property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__a , f.name )
UpperCAmelCase__ = XLMRobertaTokenizer(f.name , keep_accents=__a )
UpperCAmelCase__ = pickle.dumps(__a )
pickle.loads(__a )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = 'I was born in 92000, and this is falsé.'
UpperCAmelCase__ = tokenizer.tokenize(__a )
UpperCAmelCase__ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase__ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(__a )
UpperCAmelCase__ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'Hello World!'
UpperCAmelCase__ = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
UpperCAmelCase__ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = {'input_ids': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 146 | 0 |
"""simple docstring"""
import functools
from typing import Any
def _lowerCamelCase( a , a ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or len(lowerCamelCase_ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
__a = {}
__a = """WORD_KEEPER"""
for word in words:
__a = trie
for c in word:
if c not in trie_node:
__a = {}
__a = trie_node[c]
__a = True
__a = len(lowerCamelCase_ )
# Dynamic programming method
@functools.cache
def is_breakable(a ) -> bool:
if index == len_string:
return True
__a = trie
for i in range(lowerCamelCase_ , lowerCamelCase_ ):
__a = trie_node.get(string[i] , lowerCamelCase_ )
if trie_node is None:
return False
if trie_node.get(lowerCamelCase_ , lowerCamelCase_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
"""simple docstring"""
from math import pi
def _lowerCamelCase( a , a ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 67 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCAmelCase__ : Optional[int] = False
@skip_mps
class UpperCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ = StableDiffusionAttendAndExcitePipeline
UpperCamelCase_ = False
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCAmelCase__ ( cls) -> str:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase)
@classmethod
def lowerCAmelCase__ ( cls) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase)
def lowerCAmelCase__ ( self) -> Any:
torch.manual_seed(0)
UpperCamelCase__ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase , )
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0)
UpperCamelCase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
UpperCamelCase__ : List[Any] = CLIPTextModel(UpperCamelCase)
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
UpperCamelCase__ : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase=0) -> str:
if str(UpperCamelCase).startswith('mps'):
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCamelCase)
else:
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCamelCase).manual_seed(UpperCamelCase)
UpperCamelCase__ : List[Any] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def lowerCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : Union[str, Any] = self.get_dummy_components()
UpperCamelCase__ : Optional[Any] = self.pipeline_class(**UpperCamelCase)
pipe.to(UpperCamelCase)
pipe.set_progress_bar_config(disable=UpperCamelCase)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCamelCase)
UpperCamelCase__ : Union[str, Any] = pipe(**UpperCamelCase).images
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3))
UpperCamelCase__ : str = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496])
UpperCamelCase__ : List[str] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCamelCase , 1E-3)
def lowerCAmelCase__ ( self) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4)
def lowerCAmelCase__ ( self) -> str:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def lowerCAmelCase__ ( self) -> List[str]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4)
def lowerCAmelCase__ ( self) -> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def lowerCAmelCase__ ( self) -> Optional[Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4)
def lowerCAmelCase__ ( self) -> Dict:
super().test_save_load_local(expected_max_difference=5E-4)
def lowerCAmelCase__ ( self) -> Dict:
super().test_save_load_optional_components(expected_max_difference=4E-4)
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls) -> Optional[int]:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase)
@classmethod
def lowerCAmelCase__ ( cls) -> str:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase)
def lowerCAmelCase__ ( self) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self) -> int:
UpperCamelCase__ : Dict = torch.manual_seed(51)
UpperCamelCase__ : Dict = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=UpperCamelCase , torch_dtype=torch.floataa)
pipe.to('cuda')
UpperCamelCase__ : Optional[int] = 'a painting of an elephant with glasses'
UpperCamelCase__ : Dict = [5, 7]
UpperCamelCase__ : Optional[int] = pipe(
prompt=UpperCamelCase , token_indices=UpperCamelCase , guidance_scale=7.5 , generator=UpperCamelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
UpperCamelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy')
assert np.abs((expected_image - image).max()) < 5E-1
| 410 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=lowerCAmelCase , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase , from_pt=lowerCAmelCase , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__: int= controlnet_params
SCREAMING_SNAKE_CASE__: Union[str, Any]= '''bird'''
SCREAMING_SNAKE_CASE__: str= jax.device_count()
SCREAMING_SNAKE_CASE__: int= pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__: Any= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
SCREAMING_SNAKE_CASE__: Optional[int]= pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__: int= jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__: Dict= jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE__: Tuple= replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= pipe(
prompt_ids=lowerCAmelCase , image=lowerCAmelCase , params=lowerCAmelCase , prng_seed=lowerCAmelCase , num_inference_steps=50 , jit=lowerCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
SCREAMING_SNAKE_CASE__: Optional[int]= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__: List[str]= images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE__: List[Any]= jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__: int= jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=lowerCAmelCase , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase , from_pt=lowerCAmelCase , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__: Any= controlnet_params
SCREAMING_SNAKE_CASE__: Union[str, Any]= '''Chef in the kitchen'''
SCREAMING_SNAKE_CASE__: Union[str, Any]= jax.device_count()
SCREAMING_SNAKE_CASE__: str= pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__: Optional[Any]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
SCREAMING_SNAKE_CASE__: Tuple= pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__: List[Any]= jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__: Any= jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE__: List[Any]= replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
prompt_ids=lowerCAmelCase , image=lowerCAmelCase , params=lowerCAmelCase , prng_seed=lowerCAmelCase , num_inference_steps=50 , jit=lowerCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
SCREAMING_SNAKE_CASE__: str= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__: Optional[Any]= images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE__: Tuple= jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 107 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 107 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(_lowerCAmelCase , x % y )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 20 ):
"""simple docstring"""
_lowerCamelCase : Tuple = 1
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = lcm(_lowerCAmelCase , _lowerCAmelCase )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 0 |
import math
def __a ( A__ : int ):
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
SCREAMING_SNAKE_CASE = range(3 , int(math.sqrt(A__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __a ( A__ : Optional[int] , A__ : List[Any]=1 , **A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = factor * value
SCREAMING_SNAKE_CASE = value
while not is_prime(A__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **A__ )
return value
| 698 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Dict = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "resnet"
lowerCamelCase__ = ["basic", "bottleneck"]
def __init__( self : Optional[Any] , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=64 , __lowerCamelCase : str=[256, 512, 1024, 2048] , __lowerCamelCase : str=[3, 4, 6, 3] , __lowerCamelCase : Optional[int]="bottleneck" , __lowerCamelCase : int="relu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , **__lowerCamelCase : Dict , ):
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = downsample_in_first_stage
SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = version.parse("1.11" )
@property
def _snake_case ( self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : Optional[int] ):
return 1e-3
| 698 | 1 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase_ : Optional[Any] = 1.0_5457_1817e-34 # unit of ℏ : J * s
UpperCAmelCase_ : Union[str, Any] = 3e8 # unit of c : m * s^-1
def __SCREAMING_SNAKE_CASE ( a__ : float ,a__ : float ,a__ : float ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
__A : List[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__A : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__A : Optional[int] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class a_ ( a_ ):
'''simple docstring'''
__a: jnp.ndarray
__a: jnp.ndarray
class a_ ( nn.Module ):
'''simple docstring'''
__a: int
__a: Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
__a: jnp.dtype = jnp.floataa
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase_ = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCAmelCase_ = self.block_out_channels[i]
lowerCAmelCase_ = self.block_out_channels[i + 1]
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase_ )
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase_ )
lowerCAmelCase_ = blocks
lowerCAmelCase_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self.conv_in(lowercase_ )
lowerCAmelCase_ = nn.silu(lowercase_ )
for block in self.blocks:
lowerCAmelCase_ = block(lowercase_ )
lowerCAmelCase_ = nn.silu(lowercase_ )
lowerCAmelCase_ = self.conv_out(lowercase_ )
return embedding
@flax_register_to_config
class a_ ( nn.Module , a_ , a_ ):
'''simple docstring'''
__a: int = 3_2
__a: int = 4
__a: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__a: Union[bool, Tuple[bool]] = False
__a: Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__a: int = 2
__a: Union[int, Tuple[int]] = 8
__a: Optional[Union[int, Tuple[int]]] = None
__a: int = 1_2_8_0
__a: float = 0.0
__a: bool = False
__a: jnp.dtype = jnp.floataa
__a: bool = True
__a: int = 0
__a: str = "rgb"
__a: Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def _lowercase ( self , lowercase_ ) -> FrozenDict:
'''simple docstring'''
lowerCAmelCase_ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase_ = jnp.zeros(lowercase_ , dtype=jnp.floataa )
lowerCAmelCase_ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCAmelCase_ = jnp.zeros(lowercase_ , dtype=jnp.floataa )
lowerCAmelCase_ , lowerCAmelCase_ = jax.random.split(lowercase_ )
lowerCAmelCase_ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"]
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.block_out_channels
lowerCAmelCase_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase_ = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase_ = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype )
lowerCAmelCase_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCAmelCase_ = self.only_cross_attention
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = block_out_channels[0]
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase_ = output_channel
lowerCAmelCase_ = block_out_channels[i]
lowerCAmelCase_ = i == len(lowercase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase_ = FlaxCrossAttnDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCAmelCase_ = FlaxDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase_ )
for _ in range(self.layers_per_block ):
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
if not is_final_block:
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
lowerCAmelCase_ = down_blocks
lowerCAmelCase_ = controlnet_down_blocks
# mid
lowerCAmelCase_ = block_out_channels[-1]
lowerCAmelCase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1.0 , lowercase_ = True , lowercase_ = False , ) -> Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
lowerCAmelCase_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCAmelCase_ = jnp.flip(lowercase_ , axis=1 )
# 1. time
if not isinstance(lowercase_ , jnp.ndarray ):
lowerCAmelCase_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase_ = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase_ = jnp.expand_dims(lowercase_ , 0 )
lowerCAmelCase_ = self.time_proj(lowercase_ )
lowerCAmelCase_ = self.time_embedding(lowercase_ )
# 2. pre-process
lowerCAmelCase_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
lowerCAmelCase_ = self.conv_in(lowercase_ )
lowerCAmelCase_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
lowerCAmelCase_ = self.controlnet_cond_embedding(lowercase_ )
sample += controlnet_cond
# 3. down
lowerCAmelCase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ , lowerCAmelCase_ = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
else:
lowerCAmelCase_ , lowerCAmelCase_ = down_block(lowercase_ , lowercase_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCAmelCase_ = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
# 5. contronet blocks
lowerCAmelCase_ = ()
for down_block_res_sample, controlnet_block in zip(lowercase_ , self.controlnet_down_blocks ):
lowerCAmelCase_ = controlnet_block(lowercase_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase_ = controlnet_down_block_res_samples
lowerCAmelCase_ = self.controlnet_mid_block(lowercase_ )
# 6. scaling
lowerCAmelCase_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase_ , mid_block_res_sample=lowercase_ )
| 318 | 0 |
"""simple docstring"""
import os
from distutils.util import strtobool
def __a ( A , A ):
'''simple docstring'''
for e in env_keys:
lowercase__ = int(os.environ.get(A , -1 ) )
if val >= 0:
return val
return default
def __a ( A , A=False ):
'''simple docstring'''
lowercase__ = os.environ.get(A , str(A ) )
return strtobool(A ) == 1 # As its name indicates `strtobool` actually returns an int...
def __a ( A , A="no" ):
'''simple docstring'''
lowercase__ = os.environ.get(A , str(A ) )
return value
| 702 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 0 |
def _snake_case (__lowercase , __lowercase):
# Check if the input is valid
if not len(lowerCamelCase__) == len(lowerCamelCase__) == 3:
raise ValueError('Please enter a valid equation.')
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.')
# Extract the coefficients
UpperCamelCase_ = equationa
UpperCamelCase_ = equationa
# Calculate the determinants of the matrices
UpperCamelCase_ = aa * ba - aa * ba
UpperCamelCase_ = ca * ba - ca * ba
UpperCamelCase_ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)')
else:
raise ValueError('No solution. (Inconsistent system)')
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCamelCase_ = determinant_x / determinant
UpperCamelCase_ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 23 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _lowerCamelCase ( lowerCamelCase__ : str = "https://www.worldometers.info/coronavirus" ):
lowercase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase__ ).text , """html.parser""" )
lowercase__ : Union[str, Any] = soup.findAll("""h1""" )
lowercase__ : List[str] = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase__ , lowerCamelCase__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"{key}\n{value}\n")
| 200 | 0 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = None
@staticmethod
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
raise NotImplementedError
def __lowerCAmelCase ( self : Dict , A__ : Dict , A__ : int , A__ : str , **A__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError
def __lowerCAmelCase ( self : List[Any] , A__ : List[str] ) -> Any:
'''simple docstring'''
raise NotImplementedError
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def __lowerCAmelCase ( cls : Dict ) -> Optional[int]:
'''simple docstring'''
return F'`pip install {cls.pip_package or cls.name}`'
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "optuna"
@staticmethod
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
return is_optuna_available()
def __lowerCAmelCase ( self : List[str] , A__ : str , A__ : int , A__ : str , **A__ : Dict ) -> Tuple:
'''simple docstring'''
return run_hp_search_optuna(A__ , A__ , A__ , **A__ )
def __lowerCAmelCase ( self : List[Any] , A__ : Dict ) -> int:
'''simple docstring'''
return default_hp_space_optuna(A__ )
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "ray"
__UpperCamelCase = "'ray[tune]'"
@staticmethod
def __lowerCAmelCase ( ) -> Any:
'''simple docstring'''
return is_ray_available()
def __lowerCAmelCase ( self : Tuple , A__ : Tuple , A__ : int , A__ : str , **A__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return run_hp_search_ray(A__ , A__ , A__ , **A__ )
def __lowerCAmelCase ( self : List[Any] , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return default_hp_space_ray(A__ )
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "sigopt"
@staticmethod
def __lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return is_sigopt_available()
def __lowerCAmelCase ( self : Tuple , A__ : str , A__ : int , A__ : str , **A__ : str ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_sigopt(A__ , A__ , A__ , **A__ )
def __lowerCAmelCase ( self : Optional[Any] , A__ : Dict ) -> List[Any]:
'''simple docstring'''
return default_hp_space_sigopt(A__ )
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "wandb"
@staticmethod
def __lowerCAmelCase ( ) -> Any:
'''simple docstring'''
return is_wandb_available()
def __lowerCAmelCase ( self : Dict , A__ : List[str] , A__ : int , A__ : str , **A__ : str ) -> Dict:
'''simple docstring'''
return run_hp_search_wandb(A__ , A__ , A__ , **A__ )
def __lowerCAmelCase ( self : Dict , A__ : Dict ) -> str:
'''simple docstring'''
return default_hp_space_wandb(A__ )
__SCREAMING_SNAKE_CASE = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __a ( ):
a__ : Dict = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowerCAmelCase__ ) > 0:
a__ : Any = available_backends[0].name
if len(lowerCAmelCase__ ) > 1:
logger.info(
F'{len(lowerCAmelCase__ )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 713 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = '▁'
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
__SCREAMING_SNAKE_CASE = {
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
__SCREAMING_SNAKE_CASE = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = []
__UpperCamelCase = []
def __init__( self : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any]="<s>" , A__ : str="</s>" , A__ : Dict="</s>" , A__ : Union[str, Any]="<s>" , A__ : int="<unk>" , A__ : int="<pad>" , A__ : Any="<mask>" , A__ : List[str]=None , A__ : Union[str, Any]=None , A__ : Optional[int]=None , A__ : Optional[Dict[str, Any]] = None , A__ : int=None , A__ : Tuple=False , **A__ : str , ) -> List[Any]:
'''simple docstring'''
a__ : str = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
a__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
a__ : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , tokenizer_file=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A__ , **A__ , )
a__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
a__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
a__ : Dict = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ : List[str] = 1
a__ : List[Any] = len(self.sp_model )
a__ : Tuple = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A__ )
}
a__ : List[Any] = {v: k for k, v in self.lang_code_to_id.items()}
a__ : Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a__ : str = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
a__ : Tuple = src_lang if src_lang is not None else '''eng_Latn'''
a__ : Dict = self.lang_code_to_id[self._src_lang]
a__ : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = self.__dict__.copy()
a__ : Optional[Any] = None
a__ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , A__ : int ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a__ : str = {}
a__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCAmelCase ( self : List[str] , A__ : str ) -> None:
'''simple docstring'''
a__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCAmelCase ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
a__ : List[str] = [1] * len(self.prefix_tokens )
a__ : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def __lowerCAmelCase ( self : Any , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCAmelCase ( self : int , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : Dict , A__ : Optional[int] , A__ : str , A__ : Optional[str] , A__ : Optional[str] , **A__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
a__ : List[Any] = src_lang
a__ : List[Any] = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
a__ : Union[str, Any] = self.convert_tokens_to_ids(A__ )
a__ : Union[str, Any] = tgt_lang_id
return inputs
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Union[str, Any] , A__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(A__ , out_type=A__ )
def __lowerCAmelCase ( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ : Tuple = self.sp_model.PieceToId(A__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : List[Any] , A__ : int ) -> Union[str, Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Any ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = ''''''.join(A__ ).replace(A__ , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self : Optional[Any] , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : Tuple = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , '''wb''' ) as fi:
a__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[str] , A__ : str = "eng_Latn" , A__ : Optional[List[str]] = None , A__ : str = "fra_Latn" , **A__ : Any , ) -> BatchEncoding:
'''simple docstring'''
a__ : Optional[Any] = src_lang
a__ : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def __lowerCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCAmelCase ( self : int , A__ : Union[str, Any] ) -> None:
'''simple docstring'''
a__ : List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
a__ : Union[str, Any] = []
a__ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
a__ : Tuple = [self.cur_lang_code]
a__ : List[Any] = [self.eos_token_id]
def __lowerCAmelCase ( self : Dict , A__ : str ) -> None:
'''simple docstring'''
a__ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
a__ : Optional[Any] = []
a__ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
a__ : List[str] = [self.cur_lang_code]
a__ : str = [self.eos_token_id]
| 340 | 0 |
'''simple docstring'''
from torch import nn
def a__ ( lowerCAmelCase__ ) -> List[Any]:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 75 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
__snake_case = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
__snake_case = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : int = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case : Any = bs[:]
snake_case : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
snake_case : str = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def __lowerCAmelCase ( lowercase : int ) -> Tuple:
"""simple docstring"""
snake_case : Optional[Any] = set()
snake_case : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case : Optional[int] = char
return pairs
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
snake_case : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
snake_case : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
snake_case : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
snake_case : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
snake_case : Tuple = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding="utf-8" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(UpperCamelCase__ )
snake_case : List[Any] = {v: k for k, v in self.encoder.items()}
snake_case : Any = errors # how to handle errors in decoding
snake_case : Optional[int] = bytes_to_unicode()
snake_case : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding="utf-8" ) as merges_handle:
snake_case : List[str] = merges_handle.read().split("\n" )[1:-1]
snake_case : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : Optional[int] = {}
snake_case : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case : Optional[Any] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case : Optional[Any] = tuple(UpperCamelCase__ )
snake_case : str = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
snake_case : str = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case ,snake_case : List[Any] = bigram
snake_case : str = []
snake_case : Tuple = 0
while i < len(UpperCamelCase__ ):
try:
snake_case : Union[str, Any] = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case : Dict = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case : Optional[Any] = tuple(UpperCamelCase__ )
snake_case : int = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
snake_case : str = get_pairs(UpperCamelCase__ )
snake_case : List[Any] = " ".join(UpperCamelCase__ )
snake_case : List[Any] = word
return word
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = []
for token in re.findall(self.pat , UpperCamelCase__ ):
snake_case : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(" " ) )
return bpe_tokens
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : int = "".join(UpperCamelCase__ )
snake_case : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + "\n" )
snake_case : List[Any] = 0
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
snake_case : Optional[Any] = token_index
writer.write(" ".join(UpperCamelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case : Union[str, Any] = [self.cls_token_id]
snake_case : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
snake_case : Any = [self.sep_token_id]
snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
snake_case : str = " " + text
return (text, kwargs)
| 178 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionXLImgaImgPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a_ = PipelineTesterMixin.required_optional_params - {"latents"}
a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : str ):
torch.manual_seed(0 )
snake_case__ : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=__A , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
snake_case__ : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
snake_case__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=3_2 , )
snake_case__ : Optional[int] = CLIPTextModel(__A )
snake_case__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__A )
snake_case__ : Optional[Any] = CLIPTextModelWithProjection(__A )
snake_case__ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__A )
snake_case__ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowercase ( self : Tuple , __A : str , __A : Optional[int]=0 ):
snake_case__ : List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__A ) ).to(__A )
snake_case__ : Union[str, Any] = image / 2 + 0.5
if str(__A ).startswith("mps" ):
snake_case__ : Optional[int] = torch.manual_seed(__A )
else:
snake_case__ : List[Any] = torch.Generator(device=__A ).manual_seed(__A )
snake_case__ : str = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.7_5,
}
return inputs
def _lowercase ( self : Tuple ):
snake_case__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : str = self.get_dummy_components()
snake_case__ : str = StableDiffusionXLImgaImgPipeline(**__A )
snake_case__ : List[str] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Optional[Any] = self.get_dummy_inputs(__A )
snake_case__ : Dict = sd_pipe(**__A ).images
snake_case__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : Any = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Tuple ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _lowercase ( self : Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowercase ( self : Optional[int] ):
pass
def _lowercase ( self : int ):
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : Dict = StableDiffusionXLImgaImgPipeline(**__A )
snake_case__ : Any = sd_pipe.to(__A )
snake_case__ : int = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
# forward without prompt embeds
snake_case__ : int = self.get_dummy_inputs(__A )
snake_case__ : Tuple = 3 * ["this is a negative prompt"]
snake_case__ : Optional[Any] = negative_prompt
snake_case__ : List[str] = 3 * [inputs["prompt"]]
snake_case__ : str = sd_pipe(**__A )
snake_case__ : Optional[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
snake_case__ : Optional[Any] = self.get_dummy_inputs(__A )
snake_case__ : Union[str, Any] = 3 * ["this is a negative prompt"]
snake_case__ : Union[str, Any] = 3 * [inputs.pop("prompt" )]
(
snake_case__
) : int = sd_pipe.encode_prompt(__A , negative_prompt=__A )
snake_case__ : List[str] = sd_pipe(
**__A , prompt_embeds=__A , negative_prompt_embeds=__A , pooled_prompt_embeds=__A , negative_pooled_prompt_embeds=__A , )
snake_case__ : Optional[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Union[str, Any] , __A : str , __A : int="cpu" , __A : str=torch.floataa , __A : Tuple=0 ):
snake_case__ : Tuple = torch.Generator(device=__A ).manual_seed(__A )
snake_case__ : Union[str, Any] = np.random.RandomState(__A ).standard_normal((1, 4, 6_4, 6_4) )
snake_case__ : Dict = torch.from_numpy(__A ).to(device=__A , dtype=__A )
snake_case__ : List[str] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[Any] = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : List[str] = self.get_inputs(__A )
snake_case__ : List[str] = pipe(**__A ).images
snake_case__ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[Any] = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 716 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_vision_model"
def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ):
super().__init__(**__A )
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = patch_size
snake_case__ : int = image_size
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = qkv_bias
@classmethod
def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_qformer"
def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , **__A )
snake_case__ : Dict = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Dict = cross_attention_frequency
snake_case__ : List[str] = encoder_hidden_size
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip"
a_ = True
def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ):
super().__init__(**__A )
if vision_config is None:
snake_case__ : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
snake_case__ : Optional[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
snake_case__ : Optional[int] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case__ : List[Any] = InstructBlipVisionConfig(**__A )
snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A )
snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A )
snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings
snake_case__ : Tuple = self.text_config.is_encoder_decoder
snake_case__ : str = num_query_tokens
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : int = 1.0
snake_case__ : Optional[int] = 0.0_2
@classmethod
def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.vision_config.to_dict()
snake_case__ : List[str] = self.qformer_config.to_dict()
snake_case__ : List[Any] = self.text_config.to_dict()
snake_case__ : List[Any] = self.__class__.model_type
return output
| 25 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case (__UpperCamelCase ):
lowerCAmelCase__ :int = "Speech2TextFeatureExtractor"
lowerCAmelCase__ :Tuple = "Speech2TextTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Optional[int]:
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
lowercase__ = self.feature_extractor
lowercase__ = False
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ ,**UpperCAmelCase_ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowercase__ = kwargs.pop("raw_speech" )
else:
lowercase__ = kwargs.pop("audio" ,UpperCAmelCase_ )
lowercase__ = kwargs.pop("sampling_rate" ,UpperCAmelCase_ )
lowercase__ = kwargs.pop("text" ,UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
lowercase__ = args[0]
lowercase__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowercase__ = self.feature_extractor(UpperCAmelCase_ ,*UpperCAmelCase_ ,sampling_rate=UpperCAmelCase_ ,**UpperCAmelCase_ )
if text is not None:
lowercase__ = self.tokenizer(UpperCAmelCase_ ,**UpperCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ = encodings['input_ids']
return inputs
def _a ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def _a ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@contextmanager
def _a ( self ) -> int:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowercase__ = True
lowercase__ = self.tokenizer
yield
lowercase__ = self.feature_extractor
lowercase__ = False
| 267 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 0 |
def UpperCAmelCase__ ( A__ ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(A__ , A__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 708 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> List[str]:
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ) -> List[Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowerCamelCase__ = LlamaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Any:
lowerCamelCase__ = True
lowerCamelCase__ = LlamaModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> str:
lowerCamelCase__ = LlamaForCausalLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = LlamaForCausalLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
# first forward pass
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , )["hidden_states"][0]
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , )["hidden_states"][0]
# select random slice
lowerCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
def _lowerCamelCase ( self ) -> Any:
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _A ( __a , __a , __a , unittest.TestCase ):
__a = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a = (LlamaForCausalLM,) if is_torch_available() else ()
__a = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a = False
__a = False
def _lowerCamelCase ( self ) -> Tuple:
lowerCamelCase__ = LlamaModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _lowerCamelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Tuple:
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = 3
lowerCamelCase__ = input_dict["input_ids"]
lowerCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase__ = LlamaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self ) -> Dict:
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = 3
lowerCamelCase__ = "single_label_classification"
lowerCamelCase__ = input_dict["input_ids"]
lowerCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase__ = LlamaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self ) -> Any:
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = 3
lowerCamelCase__ = "multi_label_classification"
lowerCamelCase__ = input_dict["input_ids"]
lowerCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase__ = LlamaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def _lowerCamelCase ( self ) -> List[Any]:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = ids_tensor([1, 10] , config.vocab_size )
lowerCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ = LlamaModel(SCREAMING_SNAKE_CASE__ )
original_model.to(SCREAMING_SNAKE_CASE__ )
original_model.eval()
lowerCamelCase__ = original_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state
lowerCamelCase__ = original_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ = {"type": scaling_type, "factor": 10.0}
lowerCamelCase__ = LlamaModel(SCREAMING_SNAKE_CASE__ )
scaled_model.to(SCREAMING_SNAKE_CASE__ )
scaled_model.eval()
lowerCamelCase__ = scaled_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state
lowerCamelCase__ = scaled_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
@require_torch
class _A ( unittest.TestCase ):
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowerCamelCase__ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
lowerCamelCase__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCamelCase__ = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , SCREAMING_SNAKE_CASE__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCamelCase__ = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , SCREAMING_SNAKE_CASE__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowerCamelCase__ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
lowerCamelCase__ = model(torch.tensor(SCREAMING_SNAKE_CASE__ ) )
# Expected mean on dim = -1
lowerCamelCase__ = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , SCREAMING_SNAKE_CASE__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCamelCase__ = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , SCREAMING_SNAKE_CASE__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowerCamelCase ( self ) -> Optional[int]:
lowerCamelCase__ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowerCamelCase__ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
lowerCamelCase__ = model(torch.tensor(SCREAMING_SNAKE_CASE__ ) )
# Expected mean on dim = -1
lowerCamelCase__ = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , SCREAMING_SNAKE_CASE__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCamelCase__ = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , SCREAMING_SNAKE_CASE__ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def _lowerCamelCase ( self ) -> List[str]:
lowerCamelCase__ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
lowerCamelCase__ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
lowerCamelCase__ = model(torch.tensor(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , SCREAMING_SNAKE_CASE__ , atol=1e-2 , rtol=1e-2 )
# fmt: off
lowerCamelCase__ = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , SCREAMING_SNAKE_CASE__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Model is curently gated" )
@slow
def _lowerCamelCase ( self ) -> List[Any]:
lowerCamelCase__ = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
lowerCamelCase__ = "Simply put, the theory of relativity states that "
lowerCamelCase__ = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
lowerCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
lowerCamelCase__ = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=SCREAMING_SNAKE_CASE__ )
# greedy generation outputs
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , max_new_tokens=64 , top_p=SCREAMING_SNAKE_CASE__ , temperature=1 , do_sample=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 274 | 0 |
from __future__ import annotations
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
a_ : list[list[int]] = []
a_ : list[int] = []
a_ : List[str] = 0
a_ : Union[str, Any] = sum(SCREAMING_SNAKE_CASE_ )
create_state_space_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return result
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int , ):
"""simple docstring"""
if sum(SCREAMING_SNAKE_CASE_ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE_ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE_ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE_ )
return
for index in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE_ , remaining_nums_sum - nums[index] , )
SCREAMING_SNAKE_CASE : Optional[int] = [3, 34, 4, 12, 5, 2]
SCREAMING_SNAKE_CASE : Optional[int] = 9
SCREAMING_SNAKE_CASE : Any = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 419 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class snake_case__ ( __A ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 419 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ = os.path.join(args.tf_model_dir , "parameters.json" )
SCREAMING_SNAKE_CASE__ = json.loads(open(__UpperCAmelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
SCREAMING_SNAKE_CASE__ = args.output + ".pt"
SCREAMING_SNAKE_CASE__ = OrderedDict()
with tf.device("/CPU:0" ):
SCREAMING_SNAKE_CASE__ = tf.train.load_checkpoint(args.tf_model_dir )
SCREAMING_SNAKE_CASE__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
SCREAMING_SNAKE_CASE__ = reader.get_tensor(__UpperCAmelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
SCREAMING_SNAKE_CASE__ = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
SCREAMING_SNAKE_CASE__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.startswith("model/moe" ):
SCREAMING_SNAKE_CASE__ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
SCREAMING_SNAKE_CASE__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/softmlp/kernel" ):
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
SCREAMING_SNAKE_CASE__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
SCREAMING_SNAKE_CASE__ = key_name[-9:-7]
for i in range(16 ):
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
SCREAMING_SNAKE_CASE__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.startswith("model/mlp" ):
SCREAMING_SNAKE_CASE__ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
SCREAMING_SNAKE_CASE__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/p1/bias" ):
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/p2/kernel" ):
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
SCREAMING_SNAKE_CASE__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/p2/bias" ):
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.startswith("model/ln" ):
SCREAMING_SNAKE_CASE__ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.feed_forward.norm.bias" % player
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/g" ):
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.feed_forward.norm.weight" % player
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.startswith("model/att" ):
SCREAMING_SNAKE_CASE__ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
SCREAMING_SNAKE_CASE__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
SCREAMING_SNAKE_CASE__ = state[:, 0, :, :]
SCREAMING_SNAKE_CASE__ = state[:, 1, :, :]
SCREAMING_SNAKE_CASE__ = state[:, 2, :, :]
SCREAMING_SNAKE_CASE__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/o/kernel" ):
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
SCREAMING_SNAKE_CASE__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.startswith("model/an" ):
SCREAMING_SNAKE_CASE__ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.self_attn.norm.bias" % player
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.endswith("/g" ):
SCREAMING_SNAKE_CASE__ = "model.blocks.%d.self_attn.norm.weight" % player
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
SCREAMING_SNAKE_CASE__ = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
SCREAMING_SNAKE_CASE__ = "model.%s.weight" % nlayer
SCREAMING_SNAKE_CASE__ = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
if key_name.startswith("model/wte" ):
SCREAMING_SNAKE_CASE__ = "lm_head.weight"
SCREAMING_SNAKE_CASE__ = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name.startswith("model/wob" ):
SCREAMING_SNAKE_CASE__ = "final_logits_bias"
SCREAMING_SNAKE_CASE__ = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE__ = state.reshape((1, -1) )
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name == "model/dense/kernel":
SCREAMING_SNAKE_CASE__ = "model.last_project.weight"
SCREAMING_SNAKE_CASE__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
elif key_name == "model/dense_1/bias":
SCREAMING_SNAKE_CASE__ = "model.last_project.bias"
SCREAMING_SNAKE_CASE__ = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE__ = torch.tensor(__UpperCAmelCase )
torch.save(__UpperCAmelCase , args.output )
if __name__ == "__main__":
_A = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
_A = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 538 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 538 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 71 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class a_ :
def __init__( self : Union[str, Any] , snake_case__ : int = 6 ):
lowerCAmelCase__ = None
lowerCAmelCase__ = None
self.create_linked_list(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : int ):
lowerCAmelCase__ = Node()
lowerCAmelCase__ = current_node
lowerCAmelCase__ = current_node
lowerCAmelCase__ = current_node
for _ in range(1 , snake_case__ ):
lowerCAmelCase__ = Node()
lowerCAmelCase__ = current_node
lowerCAmelCase__ = previous_node
lowerCAmelCase__ = current_node
lowerCAmelCase__ = self.front
lowerCAmelCase__ = previous_node
def _SCREAMING_SNAKE_CASE ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase__ = self.rear.next
if self.rear:
lowerCAmelCase__ = data
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase__ = self.front.data
lowerCAmelCase__ = None
return data
lowerCAmelCase__ = self.front
lowerCAmelCase__ = old_front.next
lowerCAmelCase__ = old_front.data
lowerCAmelCase__ = None
return data
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class a_ :
def __init__( self : Union[str, Any] ):
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644 | 0 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __lowercase ( _a , _a , _a ):
snake_case_ : Tuple = AutoConfig.from_pretrained(_a )
snake_case_ : Tuple = FlaxAutoModelForSeqaSeqLM.from_config(config=_a )
snake_case_ : Union[str, Any] = checkpoints.load_tax_checkpoint(_a )
snake_case_ : Optional[int] = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
snake_case_ : str = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
snake_case_ : List[str] = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : Dict = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
snake_case_ : Any = f"layers_{str(_a )}"
# Self-Attention
snake_case_ : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
snake_case_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
snake_case_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
snake_case_ : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
snake_case_ : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
snake_case_ : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case_ : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case_ : int = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case_ : str = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case_ : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case_ : Optional[Any] = flax_model.params['''encoder''']['''block'''][str(_a )]['''layer''']
snake_case_ : List[str] = tax_attention_key
snake_case_ : Optional[Any] = tax_attention_out
snake_case_ : Any = tax_attention_query
snake_case_ : str = tax_attention_value
snake_case_ : Dict = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
snake_case_ : Any = tax_mlp_wi_a
snake_case_ : List[Any] = tax_mlp_wi_a
else:
snake_case_ : Union[str, Any] = tax_mlp_wi
snake_case_ : List[Any] = tax_mlp_wo
snake_case_ : int = tax_mlp_layer_norm
snake_case_ : Any = flax_model_encoder_layer_block
# Only for layer 0:
snake_case_ : Optional[int] = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case_ : Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ : List[str] = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
snake_case_ : Tuple = tax_encoder_global_rel_embedding
# Assigning
snake_case_ : Dict = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
snake_case_ : Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
snake_case_ : Tuple = f"layers_{str(_a )}"
# Self-Attention
snake_case_ : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
snake_case_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
snake_case_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
snake_case_ : str = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
snake_case_ : Any = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
snake_case_ : Optional[Any] = tax_enc_dec_attention_module['''key''']['''kernel''']
snake_case_ : str = tax_enc_dec_attention_module['''out''']['''kernel''']
snake_case_ : Union[str, Any] = tax_enc_dec_attention_module['''query''']['''kernel''']
snake_case_ : List[str] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
snake_case_ : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
snake_case_ : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
snake_case_ : Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
snake_case_ : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
snake_case_ : Dict = flax_model.params['''decoder''']['''block'''][str(_a )]['''layer''']
snake_case_ : int = tax_attention_key
snake_case_ : List[Any] = tax_attention_out
snake_case_ : Any = tax_attention_query
snake_case_ : Dict = tax_attention_value
snake_case_ : str = tax_pre_attention_layer_norm
snake_case_ : Any = tax_enc_dec_attention_key
snake_case_ : str = tax_enc_dec_attention_out
snake_case_ : int = tax_enc_dec_attention_query
snake_case_ : Any = tax_enc_dec_attention_value
snake_case_ : Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
snake_case_ : Tuple = tax_mlp_wi_a
snake_case_ : List[Any] = tax_mlp_wi_a
else:
snake_case_ : List[Any] = tax_mlp_wi
snake_case_ : Dict = tax_mlp_wo
snake_case_ : List[Any] = txa_mlp_layer_norm
snake_case_ : Optional[int] = flax_model_decoder_layer_block
# Decoder Normalization
snake_case_ : str = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
snake_case_ : Tuple = txa_decoder_norm
# Only for layer 0:
snake_case_ : str = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
snake_case_ : Optional[Any] = tax_decoder_rel_embedding
# Token Embeddings
snake_case_ : Union[str, Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
snake_case_ : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
snake_case_ : Union[str, Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(_a )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowercase__ : Dict = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 721 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowercase__ : Optional[int] = logging.get_logger(__name__)
def __lowercase ( _a , _a , _a , _a=None , _a=None ):
# Recurse if needed
if "." in tensor_name:
snake_case_ : Union[str, Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
snake_case_ : Any = getattr(_a , _a )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
snake_case_ : int = new_module
snake_case_ : str = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." )
snake_case_ : Tuple = tensor_name in module._buffers
snake_case_ : Optional[int] = getattr(_a , _a )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = False
if is_buffer or not is_bitsandbytes_available():
snake_case_ : Optional[Any] = False
snake_case_ : Tuple = False
else:
snake_case_ : Tuple = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
snake_case_ : int = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
snake_case_ : List[str] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
snake_case_ : Any = old_value.to(_a )
elif isinstance(_a , torch.Tensor ):
snake_case_ : str = value.to('''cpu''' )
if value.dtype == torch.inta:
snake_case_ : List[Any] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
snake_case_ : Tuple = torch.tensor(_a , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _a ) and fpaa_statistics is None:
snake_case_ : Any = new_value.T
snake_case_ : Tuple = old_value.__dict__
if is_abit:
snake_case_ : Tuple = bnb.nn.IntaParams(_a , requires_grad=_a , **_a ).to(_a )
elif is_abit:
snake_case_ : Any = bnb.nn.Paramsabit(_a , requires_grad=_a , **_a ).to(_a )
snake_case_ : Union[str, Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(_a ) )
else:
if value is None:
snake_case_ : Dict = old_value.to(_a )
elif isinstance(_a , torch.Tensor ):
snake_case_ : Dict = value.to(_a )
else:
snake_case_ : str = torch.tensor(_a , device=_a )
if is_buffer:
snake_case_ : Optional[int] = new_value
else:
snake_case_ : Optional[Any] = nn.Parameter(_a , requires_grad=old_value.requires_grad )
snake_case_ : List[Any] = new_value
def __lowercase ( _a , _a=None , _a=None , _a=None , _a=False ):
for name, module in model.named_children():
if current_key_name is None:
snake_case_ : List[str] = []
current_key_name.append(_a )
if (isinstance(_a , nn.Linear ) or isinstance(_a , _a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_a , _a ):
snake_case_, snake_case_ : List[Any] = module.weight.shape
else:
snake_case_ : Dict = module.in_features
snake_case_ : Tuple = module.out_features
if quantization_config.quantization_method() == "llm_int8":
snake_case_ : str = bnb.nn.LinearabitLt(
_a , _a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
snake_case_ : str = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
snake_case_ : Union[str, Any] = bnb.nn.Linearabit(
_a , _a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
snake_case_ : List[Any] = True
# Store the module class in case we need to transpose the weight later
snake_case_ : str = type(_a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_a )
if len(list(module.children() ) ) > 0:
snake_case_, snake_case_ : Optional[int] = _replace_with_bnb_linear(
_a , _a , _a , _a , has_been_replaced=_a , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowercase ( _a , _a=None , _a=None , _a=None ):
snake_case_ : Any = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
snake_case_, snake_case_ : List[Any] = _replace_with_bnb_linear(
_a , _a , _a , _a )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __lowercase ( *_a , **_a ):
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , _a , )
return replace_with_bnb_linear(*_a , **_a )
def __lowercase ( *_a , **_a ):
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , _a , )
return set_module_quantized_tensor_to_device(*_a , **_a )
def __lowercase ( _a ):
snake_case_ : List[str] = deepcopy(_a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
snake_case_ : Optional[Any] = find_tied_parameters(_a )
# For compatibility with Accelerate < 0.18
if isinstance(_a , _a ):
snake_case_ : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case_ : str = sum(_a , [] )
snake_case_ : str = len(_a ) > 0
# Check if it is a base model
snake_case_ : Dict = not hasattr(_a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ : Dict = list(model.named_children() )
snake_case_ : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ : Optional[int] = set(_a ) - set(_a )
snake_case_ : List[Any] = list(set(_a ) ) + list(_a )
# remove ".weight" from the keys
snake_case_ : Any = ['''.weight''', '''.bias''']
snake_case_ : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ : List[str] = name.replace(_a , '''''' )
filtered_module_names.append(_a )
return filtered_module_names
| 485 | 0 |
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = size
_UpperCAmelCase = [0] * size
_UpperCAmelCase = [0] * size
@staticmethod
def UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
return index | (index + 1)
@staticmethod
def UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
return (index & (index + 1)) - 1
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = value
while index < self.size:
_UpperCAmelCase = self.get_prev(_SCREAMING_SNAKE_CASE ) + 1
if current_left_border == index:
_UpperCAmelCase = value
else:
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_next(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
right -= 1 # Because of right is exclusive
_UpperCAmelCase = 0
while left <= right:
_UpperCAmelCase = self.get_prev(_SCREAMING_SNAKE_CASE )
if left <= current_left:
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , self.tree[right] )
_UpperCAmelCase = current_left
else:
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 518 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
a = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def _SCREAMING_SNAKE_CASE ( snake_case = "mumbai" ) -> Generator[tuple[str, str], None, None]:
_UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
_UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 518 | 1 |
'''simple docstring'''
def __lowerCAmelCase ( a_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = abs(a_ )
SCREAMING_SNAKE_CASE : Any = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( a_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = abs(a_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( a_ ) -> int:
'''simple docstring'''
return sum(int(a_ ) for c in str(abs(a_ ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a_ , a_ ) -> None:
SCREAMING_SNAKE_CASE : Optional[int] = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE : Tuple = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(a_ )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(a_ , a_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 179 |
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( a_ , a_ = None ) -> list[list[str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = word_bank or []
# create a table
SCREAMING_SNAKE_CASE : int = len(a_ ) + 1
SCREAMING_SNAKE_CASE : list[list[list[str]]] = []
for _ in range(a_ ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE : List[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a_ )] == word:
SCREAMING_SNAKE_CASE : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a_ )]:
combination.reverse()
return table[len(a_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 179 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 570 |
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
a_ : Union[str, Any] = _modexpt(__A , exponent // 2 , __A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__A , exponent - 1 , __A )) % modulo_value
def SCREAMING_SNAKE_CASE_ ( __A : int = 17_77 , __A : int = 18_55 , __A : int = 8 ) -> int:
"""simple docstring"""
a_ : List[str] = base
for _ in range(1 , __A ):
a_ : str = _modexpt(__A , __A , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 570 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 27 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 1 |
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case_ ( A_ : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def snake_case_ ( A_ : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = '''Morse code here!'''
print(A_ )
_lowerCamelCase : Optional[Any] = encrypt(A_ )
print(A_ )
_lowerCamelCase : Optional[Any] = decrypt(A_ )
print(A_ )
if __name__ == "__main__":
main()
| 83 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class _a ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def _snake_case ():
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
def _snake_case ():
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@require_beam
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> List[str]:
import apache_beam as beam
UpperCamelCase_ = beam.io.parquetio.WriteToParquet
UpperCamelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , _UpperCAmelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 23 | 0 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : int = OmegaConf.load(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location='''cpu''' )['''model''']
__UpperCAmelCase : List[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : Tuple = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = state_dict[key]
# extract state_dict for UNetLDM
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Any = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCAmelCase_ ):
__UpperCAmelCase : int = state_dict[key]
__UpperCAmelCase : Any = config.model.params.first_stage_config.params
__UpperCAmelCase : Dict = config.model.params.unet_config.params
__UpperCAmelCase : Tuple = VQModel(**UpperCAmelCase_ ).eval()
vqvae.load_state_dict(UpperCAmelCase_ )
__UpperCAmelCase : Any = UNetLDMModel(**UpperCAmelCase_ ).eval()
unet.load_state_dict(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCAmelCase_ , )
__UpperCAmelCase : Tuple = LDMPipeline(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
pipeline.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :str = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
lowercase__ :Dict = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 374 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=0 ) ->Dict:
"""simple docstring"""
return sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : x[column] )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=float('''inf''' ) ) ->str:
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase : Tuple = current_dis
return min_dis
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=float('''inf''' ) ) ->str:
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , UpperCAmelCase_ ):
for j in range(max(0 , i - 6 ) , UpperCAmelCase_ ):
__UpperCAmelCase : Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase : Tuple = current_dis
return min_dis
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Any:
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(UpperCAmelCase_ , UpperCAmelCase_ )
# recursion
__UpperCAmelCase : Any = points_counts // 2
__UpperCAmelCase : Any = closest_pair_of_points_sqr(
UpperCAmelCase_ , points_sorted_on_y[:mid] , UpperCAmelCase_ )
__UpperCAmelCase : Tuple = closest_pair_of_points_sqr(
UpperCAmelCase_ , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCAmelCase : List[Any] = min(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : int = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = dis_between_closest_in_strip(
UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return min(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
"""simple docstring"""
__UpperCAmelCase : str = column_based_sort(UpperCAmelCase_ , column=0 )
__UpperCAmelCase : Any = column_based_sort(UpperCAmelCase_ , column=1 )
return (
closest_pair_of_points_sqr(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
) ** 0.5
if __name__ == "__main__":
lowercase__ :Optional[Any] = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 374 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
if audio_length_in_s is None:
SCREAMING_SNAKE_CASE_ = self.unet.config.sample_size / self.unet.config.sample_rate
SCREAMING_SNAKE_CASE_ = audio_length_in_s * self.unet.config.sample_rate
SCREAMING_SNAKE_CASE_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
f' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
SCREAMING_SNAKE_CASE_ = int(SCREAMING_SNAKE_CASE_ )
if sample_size % down_scale_factor != 0:
SCREAMING_SNAKE_CASE_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
f' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
''' process.''' )
SCREAMING_SNAKE_CASE_ = int(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = next(iter(self.unet.parameters() ) ).dtype
SCREAMING_SNAKE_CASE_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
SCREAMING_SNAKE_CASE_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=audio.device )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# 2. compute previous image: x_t -> t_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
SCREAMING_SNAKE_CASE_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
SCREAMING_SNAKE_CASE_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=SCREAMING_SNAKE_CASE_ )
| 626 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = None
UpperCAmelCase__ = BloomTokenizerFast
UpperCAmelCase__ = BloomTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = '''tokenizer_file'''
UpperCAmelCase__ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
SCREAMING_SNAKE_CASE_ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
SCREAMING_SNAKE_CASE_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ )['''input_ids''']
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
SCREAMING_SNAKE_CASE_ = '''This is a simple input'''
SCREAMING_SNAKE_CASE_ = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE_ = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE_ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
SCREAMING_SNAKE_CASE_ = None # Hotfixing padding = None
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = next(iter(SCREAMING_SNAKE_CASE_ ) )['''premise'''] # pick up one data
SCREAMING_SNAKE_CASE_ = list(sample_data.values() )
SCREAMING_SNAKE_CASE_ = list(map(tokenizer.encode , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) for x in output_tokens]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 626 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_UpperCAmelCase = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
SCREAMING_SNAKE_CASE_: Any =self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple ="""src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE_: List[Any] =comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
SCREAMING_SNAKE_CASE_: Any =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
SCREAMING_SNAKE_CASE_: Tuple =black.format_str(lowerCAmelCase , mode=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =os.path.join(self.diffusers_dir , """new_code.py""" )
with open(lowerCAmelCase , """w""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase )
with open(lowerCAmelCase , """r""" ) as f:
self.assertTrue(f.read() , lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , lowerCAmelCase ) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE_: Dict ="""TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , f'''{long_class_name}SchedulerOutput''' , re.sub("""Bert""" , lowerCAmelCase , lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , lowerCAmelCase , overwrite_result=re.sub("""DDPM""" , """Test""" , lowerCAmelCase ) , )
| 36 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _A ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E00 and cp <= 0X9_FFF)
or (cp >= 0X3_400 and cp <= 0X4_DBF) #
or (cp >= 0X20_000 and cp <= 0X2A_6DF) #
or (cp >= 0X2A_700 and cp <= 0X2B_73F) #
or (cp >= 0X2B_740 and cp <= 0X2B_81F) #
or (cp >= 0X2B_820 and cp <= 0X2C_EAF) #
or (cp >= 0XF_900 and cp <= 0XF_AFF)
or (cp >= 0X2F_800 and cp <= 0X2F_A1F) #
): #
return True
return False
def lowerCamelCase__ ( _A ):
# word like '180' or '身高' or '神'
for char in word:
a : List[str] = ord(_A )
if not _is_chinese_char(_A ):
return 0
return 1
def lowerCamelCase__ ( _A ):
a : str = set()
for token in tokens:
a : Union[str, Any] = len(_A ) > 1 and is_chinese(_A )
if chinese_word:
word_set.add(_A )
a : Optional[Any] = list(_A )
return word_list
def lowerCamelCase__ ( _A , _A ):
if not chinese_word_set:
return bert_tokens
a : List[Any] = max([len(_A ) for w in chinese_word_set] )
a : Dict = bert_tokens
a , a : Dict = 0, len(_A )
while start < end:
a : List[Any] = True
if is_chinese(bert_word[start] ):
a : str = min(end - start , _A )
for i in range(_A , 1 , -1 ):
a : Dict = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a : str = '##' + bert_word[j]
a : Union[str, Any] = start + i
a : List[Any] = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _A , _A , _A ):
a : Tuple = []
for i in range(0 , len(_A ) , 100 ):
a : str = ltp_tokenizer.seg(lines[i : i + 100] )[0]
a : Union[str, Any] = [get_chinese_word(_A ) for r in res]
ltp_res.extend(_A )
assert len(_A ) == len(_A )
a : Any = []
for i in range(0 , len(_A ) , 100 ):
a : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_A , truncation=_A , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_A ) == len(_A )
a : Optional[Any] = []
for input_ids, chinese_word in zip(_A , _A ):
a : Any = []
for id in input_ids:
a : str = bert_tokenizer._convert_id_to_token(_A )
input_tokens.append(_A )
a : List[str] = add_sub_symbol(_A , _A )
a : Optional[int] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_A ):
if token[:2] == "##":
a : str = token[2:]
# save chinese tokens' pos
if len(_A ) == 1 and _is_chinese_char(ord(_A ) ):
ref_id.append(_A )
ref_ids.append(_A )
assert len(_A ) == len(_A )
return ref_ids
def lowerCamelCase__ ( _A ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a : List[Any] = f.readlines()
a : Tuple = [line.strip() for line in data if len(_A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a : Dict = LTP(args.ltp ) # faster in GPU device
a : Any = BertTokenizer.from_pretrained(args.bert )
a : Dict = prepare_ref(_A , _A , _A )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a : Dict = [json.dumps(_A ) + '\n' for ref in ref_ids]
f.writelines(_A )
if __name__ == "__main__":
lowerCAmelCase: Dict = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
lowerCAmelCase: Union[str, Any] = parser.parse_args()
main(args)
| 526 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: Tuple = logging.get_logger(__name__)
lowerCAmelCase: Dict = '▁'
lowerCAmelCase: Dict = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase: int = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase: Any = {
'facebook/s2t-small-librispeech-asr': 1_0_2_4,
}
lowerCAmelCase: Optional[int] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase: List[Any] = {'mustc': MUSTC_LANGS}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = MAX_MODEL_INPUT_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = []
def __init__( self : int , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : str="<unk>" , __snake_case : Dict=False , __snake_case : int=False , __snake_case : str=None , __snake_case : Optional[int]=None , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Union[str, Any] , ):
a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , do_upper_case=__snake_case , do_lower_case=__snake_case , tgt_lang=__snake_case , lang_codes=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
a : Tuple = do_upper_case
a : Optional[Any] = do_lower_case
a : List[str] = load_json(__snake_case )
a : Dict = {v: k for k, v in self.encoder.items()}
a : int = spm_file
a : Tuple = load_spm(__snake_case , self.sp_model_kwargs )
if lang_codes is not None:
a : Any = lang_codes
a : str = LANGUAGES[lang_codes]
a : Tuple = [F"""<lang:{lang}>""" for lang in self.langs]
a : str = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
a : Optional[Any] = self.lang_tokens
a : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
a : List[str] = {}
@property
def lowercase_ ( self : Optional[Any] ):
return len(self.encoder )
@property
def lowercase_ ( self : int ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self : int , __snake_case : Optional[int] ):
a : Union[str, Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(__snake_case )
def lowercase_ ( self : str , __snake_case : str ):
a : int = self.lang_code_to_id[tgt_lang]
a : int = [lang_code_id]
def lowercase_ ( self : Optional[int] , __snake_case : str ):
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowercase_ ( self : List[str] , __snake_case : List[Any] ):
return self.encoder.get(__snake_case , self.encoder[self.unk_token] )
def lowercase_ ( self : List[Any] , __snake_case : int ):
return self.decoder.get(__snake_case , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ):
a : List[Any] = []
a : List[Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
a : Union[str, Any] = self.sp_model.decode(__snake_case )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
a : Optional[int] = []
else:
current_sub_tokens.append(__snake_case )
a : Tuple = self.sp_model.decode(__snake_case )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self : int , __snake_case : List[Any] , __snake_case : List[str]=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
a : Optional[int] = [1] * len(self.prefix_tokens )
a : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def lowercase_ ( self : Union[str, Any] ):
a : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
a : List[str] = self.__dict__.copy()
a : Union[str, Any] = None
return state
def __setstate__( self : str , __snake_case : Dict ):
a : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a : int = {}
a : Any = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = Path(__snake_case )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
a : Any = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
a : List[Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , __snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __snake_case )
elif not os.path.isfile(self.spm_file ):
with open(__snake_case , 'wb' ) as fi:
a : Tuple = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (str(__snake_case ), str(__snake_case ))
def lowerCamelCase__ ( _A , _A ):
a : List[Any] = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def lowerCamelCase__ ( _A ):
with open(_A , 'r' ) as f:
return json.load(_A )
def lowerCamelCase__ ( _A , _A ):
with open(_A , 'w' ) as f:
json.dump(_A , _A , indent=2 )
| 526 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __snake_case:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=13 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=99 , __lowerCamelCase=32 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=37 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=16 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ):
'''simple docstring'''
__A : Any = parent
__A : List[Any] = batch_size
__A : Union[str, Any] = seq_length
__A : Union[str, Any] = is_training
__A : Any = use_input_mask
__A : List[Any] = use_token_type_ids
__A : int = use_labels
__A : Any = vocab_size
__A : Any = hidden_size
__A : Dict = num_hidden_layers
__A : Dict = num_attention_heads
__A : Union[str, Any] = intermediate_size
__A : int = hidden_act
__A : int = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Any = max_position_embeddings
__A : Any = type_vocab_size
__A : Optional[int] = type_sequence_label_size
__A : str = initializer_range
__A : int = num_labels
__A : int = num_choices
__A : Tuple = scope
def _a ( self ):
'''simple docstring'''
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[int] = None
if self.use_input_mask:
__A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__A : str = None
if self.use_token_type_ids:
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Any = None
__A : List[str] = None
__A : Optional[int] = None
if self.use_labels:
__A : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__A : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = LlamaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
__A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
__A : List[str] = True
__A : Union[str, Any] = LlamaModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : Optional[int] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
__A : Optional[int] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )
__A : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
__A : List[Any] = LlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
__A : Optional[int] = True
__A : Optional[Any] = True
__A : Union[str, Any] = LlamaForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
__A : Union[str, Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase , )
__A : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
__A : int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['hidden_states'][0]
__A : Dict = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , )['hidden_states'][0]
# select random slice
__A : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def _a ( self ):
'''simple docstring'''
__A : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : int = config_and_inputs
__A : int = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __snake_case( A_ , A_ , A_ , unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def _a ( self ):
'''simple docstring'''
__A : Optional[Any] = LlamaModelTester(self )
__A : Tuple = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _a ( self ):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : Union[str, Any] = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _a ( self ):
'''simple docstring'''
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = 3
__A : Union[str, Any] = input_dict['input_ids']
__A : Optional[Any] = input_ids.ne(1 ).to(__lowerCamelCase )
__A : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Union[str, Any] = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ):
'''simple docstring'''
__A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[Any] = 3
__A : Union[str, Any] = 'single_label_classification'
__A : str = input_dict['input_ids']
__A : List[Any] = input_ids.ne(1 ).to(__lowerCamelCase )
__A : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : List[Any] = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : str = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ):
'''simple docstring'''
__A , __A : Any = self.model_tester.prepare_config_and_inputs_for_common()
__A : int = 3
__A : List[str] = 'multi_label_classification'
__A : str = input_dict['input_ids']
__A : Tuple = input_ids.ne(1 ).to(__lowerCamelCase )
__A : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : List[Any] = LlamaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _a ( self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = ids_tensor([1, 10] , config.vocab_size )
__A : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : Optional[Any] = LlamaModel(__lowerCamelCase )
original_model.to(__lowerCamelCase )
original_model.eval()
__A : str = original_model(__lowerCamelCase ).last_hidden_state
__A : str = original_model(__lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : Tuple = {'type': scaling_type, 'factor': 10.0}
__A : str = LlamaModel(__lowerCamelCase )
scaled_model.to(__lowerCamelCase )
scaled_model.eval()
__A : Tuple = scaled_model(__lowerCamelCase ).last_hidden_state
__A : List[Any] = scaled_model(__lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-5 ) )
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _a ( self ):
'''simple docstring'''
__A : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
__A : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__A : Optional[int] = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : Dict = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _a ( self ):
'''simple docstring'''
__A : Optional[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[str] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
__A : List[str] = model(torch.tensor(__lowerCamelCase ) )
# Expected mean on dim = -1
__A : List[Any] = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : int = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _a ( self ):
'''simple docstring'''
__A : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
__A : Optional[Any] = model(torch.tensor(__lowerCamelCase ) )
# Expected mean on dim = -1
__A : Optional[int] = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : Optional[int] = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _a ( self ):
'''simple docstring'''
__A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
__A : List[str] = model(torch.tensor(__lowerCamelCase ) )
__A : Optional[Any] = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __lowerCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
__A : List[Any] = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __lowerCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def _a ( self ):
'''simple docstring'''
__A : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
__A : List[str] = 'Simply put, the theory of relativity states that '
__A : List[str] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
__A : str = tokenizer.encode(__lowerCamelCase , return_tensors='pt' )
__A : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=__lowerCamelCase )
# greedy generation outputs
__A : List[Any] = model.generate(__lowerCamelCase , max_new_tokens=64 , top_p=__lowerCamelCase , temperature=1 , do_sample=__lowerCamelCase )
__A : Any = tokenizer.decode(generated_ids[0] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 237 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase : Dict =[
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _lowercase ( ) -> Optional[Any]:
'''simple docstring'''
__A : Union[str, Any] = Github(os.environ['GITHUB_TOKEN'] )
__A : Union[str, Any] = g.get_repo('huggingface/diffusers' )
__A : Optional[int] = repo.get_issues(state='open' )
for issue in open_issues:
__A : Any = sorted(issue.get_comments() , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=_SCREAMING_SNAKE_CASE )
__A : Optional[int] = comments[0] if len(_SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 237 | 1 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
debug_launcher(test_script.main )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 103 |
'''simple docstring'''
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = []
lowercase = set({"(", "[", "{"} )
lowercase = set({")", "]", "}"} )
lowercase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(lowerCAmelCase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCAmelCase_ ) == 0 or (len(lowerCAmelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCAmelCase_ ) == 0
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = input("Enter sequence of brackets: " )
if is_balanced(lowerCAmelCase_ ):
print(lowerCAmelCase_ , "is balanced" )
else:
print(lowerCAmelCase_ , "is not balanced" )
if __name__ == "__main__":
main()
| 310 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'gpt_bigcode'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, A_=5_0257, A_=1024, A_=768, A_=12, A_=12, A_=None, A_="gelu_pytorch_tanh", A_=0.1, A_=0.1, A_=0.1, A_=1E-5, A_=0.02, A_=True, A_=True, A_=5_0256, A_=5_0256, A_=True, A_=True, A_=True, **A_, ) -> Dict:
UpperCAmelCase__ =vocab_size
UpperCAmelCase__ =n_positions
UpperCAmelCase__ =n_embd
UpperCAmelCase__ =n_layer
UpperCAmelCase__ =n_head
UpperCAmelCase__ =n_inner
UpperCAmelCase__ =activation_function
UpperCAmelCase__ =resid_pdrop
UpperCAmelCase__ =embd_pdrop
UpperCAmelCase__ =attn_pdrop
UpperCAmelCase__ =layer_norm_epsilon
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =scale_attn_weights
UpperCAmelCase__ =use_cache
UpperCAmelCase__ =attention_softmax_in_fpaa
UpperCAmelCase__ =scale_attention_softmax_in_fpaa
UpperCAmelCase__ =multi_query
UpperCAmelCase__ =bos_token_id
UpperCAmelCase__ =eos_token_id
super().__init__(bos_token_id=A_, eos_token_id=A_, **A_ )
| 510 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__a : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
requires_backends(self , "decord" )
self.check_model_type(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 ) -> List[Any]:
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
UpperCamelCase = BytesIO(requests.get(SCREAMING_SNAKE_CASE ).content )
UpperCamelCase = VideoReader(SCREAMING_SNAKE_CASE )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num=SCREAMING_SNAKE_CASE , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(SCREAMING_SNAKE_CASE ).asnumpy()
UpperCamelCase = list(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> Optional[Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 606 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A ( a ):
__UpperCAmelCase : int = """Wav2Vec2FeatureExtractor"""
__UpperCAmelCase : str = """AutoTokenizer"""
def __init__( self , snake_case_ , snake_case_ ) -> Union[str, Any]:
super().__init__(snake_case_ , snake_case_ )
_a = self.feature_extractor
_a = False
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , **snake_case_ ) -> List[str]:
try:
return super().from_pretrained(snake_case_ , **snake_case_ )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case_ , )
_a = WavaVecaFeatureExtractor.from_pretrained(snake_case_ , **snake_case_ )
_a = WavaVecaCTCTokenizer.from_pretrained(snake_case_ , **snake_case_ )
return cls(feature_extractor=snake_case_ , tokenizer=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
_a = kwargs.pop("raw_speech" )
else:
_a = kwargs.pop("audio" , snake_case_ )
_a = kwargs.pop("sampling_rate" , snake_case_ )
_a = kwargs.pop("text" , snake_case_ )
if len(snake_case_ ) > 0:
_a = args[0]
_a = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_a = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if text is not None:
_a = self.tokenizer(snake_case_ , **snake_case_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_a = encodings["input_ids"]
return inputs
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case_ , **snake_case_ )
_a = kwargs.pop("input_features" , snake_case_ )
_a = kwargs.pop("labels" , snake_case_ )
if len(snake_case_ ) > 0:
_a = args[0]
_a = args[1:]
if input_features is not None:
_a = self.feature_extractor.pad(snake_case_ , *snake_case_ , **snake_case_ )
if labels is not None:
_a = self.tokenizer.pad(snake_case_ , **snake_case_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_a = labels["input_ids"]
return input_features
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> str:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> Tuple:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@contextmanager
def __lowerCAmelCase ( self ) -> Dict:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
_a = True
_a = self.tokenizer
yield
_a = self.feature_extractor
_a = False
| 718 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Dict = 5
# Realm tok
SCREAMING_SNAKE_CASE_ : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=lowerCAmelCase__ , )
return block_records
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_config()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_retriever()
SCREAMING_SNAKE_CASE_ : List[str] = retriever.tokenizer
SCREAMING_SNAKE_CASE_ : Any = np.array([0, 3] , dtype='long' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(['Test question'] ).input_ids
SCREAMING_SNAKE_CASE_ : int = tokenizer(
['the fourth'] , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , ).input_ids
SCREAMING_SNAKE_CASE_ : Any = config.reader_seq_len
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = retriever(
lowerCAmelCase__ , lowerCAmelCase__ , answer_ids=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors='np' )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_retriever()
SCREAMING_SNAKE_CASE_ : Tuple = retriever.tokenizer
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0, 3, 5] , dtype='long' )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(['Test question'] ).input_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , ).input_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config.reader_seq_len
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = retriever(
lowerCAmelCase__ , lowerCAmelCase__ , answer_ids=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors='np' )
self.assertEqual([False, True, True] , lowerCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowerCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
SCREAMING_SNAKE_CASE_ : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
SCREAMING_SNAKE_CASE_ : Optional[Any] = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
| 101 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = MobileBertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = MobileBertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
__a = load_tf_weights_in_mobilebert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 700 |
def __snake_case ( _UpperCAmelCase ):
__a = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( _UpperCAmelCase ):
__a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__a = remove_duplicates(key.upper() )
__a = len(_UpperCAmelCase )
# First fill cipher with key characters
__a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCAmelCase ) , 26 ):
__a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__a = alphabet[i - offset]
__a = char
return cipher_alphabet
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( ):
__a = input('''Enter message to encode or decode: ''' ).strip()
__a = input('''Enter keyword: ''' ).strip()
__a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__a = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__a = create_cipher_map(_UpperCAmelCase )
print(func(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( __magic_name__ :Tuple , __magic_name__ :Tuple=False , __magic_name__ :List[Any]=False , __magic_name__ :Optional[int]=False ):
UpperCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _lowerCAmelCase ( __magic_name__ :Optional[int] , __magic_name__ :List[Any] ):
for i in range(config.num_hidden_layers ):
UpperCAmelCase_ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase_ = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( __magic_name__ :List[str] ):
UpperCAmelCase_ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ :Dict , __magic_name__ :Any , __magic_name__ :Optional[Any] ):
UpperCAmelCase_ = dct.pop(__magic_name__ )
UpperCAmelCase_ = val
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ :Any , __magic_name__ :List[Any] ):
UpperCAmelCase_ = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__magic_name__ )
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
if "vqa" in checkpoint_url:
UpperCAmelCase_ = True
UpperCAmelCase_ = 3_1_2_9
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''vqa2-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = ViltForQuestionAnswering(__magic_name__ )
elif "nlvr" in checkpoint_url:
UpperCAmelCase_ = True
UpperCAmelCase_ = 2
UpperCAmelCase_ = {0: '''False''', 1: '''True'''}
UpperCAmelCase_ = {v: k for k, v in config.idalabel.items()}
UpperCAmelCase_ = 3
UpperCAmelCase_ = ViltForImagesAndTextClassification(__magic_name__ )
elif "irtr" in checkpoint_url:
UpperCAmelCase_ = True
UpperCAmelCase_ = ViltForImageAndTextRetrieval(__magic_name__ )
elif "mlm_itm" in checkpoint_url:
UpperCAmelCase_ = True
UpperCAmelCase_ = ViltForMaskedLM(__magic_name__ )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''state_dict''']
UpperCAmelCase_ = create_rename_keys(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ )
if mlm_model or irtr_model:
UpperCAmelCase_ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
UpperCAmelCase_, UpperCAmelCase_ = model.load_state_dict(__magic_name__ , strict=__magic_name__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__magic_name__ )
# Define processor
UpperCAmelCase_ = ViltImageProcessor(size=3_8_4 )
UpperCAmelCase_ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCAmelCase_ = ViltProcessor(__magic_name__ , __magic_name__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
UpperCAmelCase_ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__magic_name__ ).raw )
UpperCAmelCase_ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__magic_name__ ).raw )
UpperCAmelCase_ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
UpperCAmelCase_ = processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
UpperCAmelCase_ = processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
UpperCAmelCase_ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
UpperCAmelCase_ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__magic_name__ ).raw )
if mlm_model:
UpperCAmelCase_ = '''a bunch of [MASK] laying on a [MASK].'''
else:
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
UpperCAmelCase_ = model(**__magic_name__ )
# Verify outputs
if mlm_model:
UpperCAmelCase_ = torch.Size([1, 1_1, 3_0_5_2_2] )
UpperCAmelCase_ = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __magic_name__ , atol=1e-4 )
# verify masked token prediction equals "cats"
UpperCAmelCase_ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
UpperCAmelCase_ = torch.Size([1, 3_1_2_9] )
UpperCAmelCase_ = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __magic_name__ , atol=1e-4 )
# verify vqa prediction equals "2"
UpperCAmelCase_ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
UpperCAmelCase_ = torch.Size([1, 2] )
UpperCAmelCase_ = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 121 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=__A ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=__A ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=__A )
return parser.parse_args()
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = parse_args()
# Import training_script as a module.
lowerCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase : Optional[Any] = script_fpath.stem
lowerCAmelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
lowerCAmelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 715 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 0 |
'''simple docstring'''
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = [1]
for i in range(2 , A__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__lowercase = []
__lowercase = list(range(A__ ) )
# Find permutation
while factorials:
__lowercase = factorials.pop()
__lowercase , __lowercase = divmod(A__ , A__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowercase: List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( snake_case ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , snake_case , )
if isinstance(snake_case , torch.Tensor ):
return image
elif isinstance(snake_case , PIL.Image.Image ):
_lowerCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = image[0].size
_lowerCAmelCase , _lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_lowerCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase = np.concatenate(snake_case , axis=0 )
_lowerCAmelCase = np.array(snake_case ).astype(np.floataa ) / 2_55.0
_lowerCAmelCase = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase = 2.0 * image - 1.0
_lowerCAmelCase = torch.from_numpy(snake_case )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase = torch.cat(snake_case , dim=0 )
return image
def _lowerCamelCase ( snake_case ):
if isinstance(snake_case , torch.Tensor ):
return mask
elif isinstance(snake_case , PIL.Image.Image ):
_lowerCAmelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = mask[0].size
_lowerCAmelCase , _lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_lowerCAmelCase = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
_lowerCAmelCase = np.concatenate(snake_case , axis=0 )
_lowerCAmelCase = mask.astype(np.floataa ) / 2_55.0
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = torch.from_numpy(snake_case )
elif isinstance(mask[0] , torch.Tensor ):
_lowerCAmelCase = torch.cat(snake_case , dim=0 )
return mask
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ =42
UpperCamelCase__ =42
def __init__( self : int , lowercase__ : List[Any] , lowercase__ : Optional[Any] ):
super().__init__()
self.register_modules(unet=lowercase__ , scheduler=lowercase__ )
@torch.no_grad()
def __call__( self : Any , lowercase__ : Union[torch.Tensor, PIL.Image.Image] , lowercase__ : Union[torch.Tensor, PIL.Image.Image] , lowercase__ : int = 2_50 , lowercase__ : float = 0.0 , lowercase__ : int = 10 , lowercase__ : int = 10 , lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase__ : Optional[str] = "pil" , lowercase__ : bool = True , ):
_lowerCAmelCase = image
_lowerCAmelCase = _preprocess_image(lowercase__ )
_lowerCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
_lowerCAmelCase = _preprocess_mask(lowercase__ )
_lowerCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
_lowerCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(lowercase__ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_lowerCAmelCase = original_image.shape
_lowerCAmelCase = randn_tensor(lowercase__ , generator=lowercase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase__ , lowercase__ , lowercase__ , self.device )
_lowerCAmelCase = eta
_lowerCAmelCase = self.scheduler.timesteps[0] + 1
_lowerCAmelCase = generator[0] if isinstance(lowercase__ , lowercase__ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_lowerCAmelCase = self.unet(lowercase__ , lowercase__ ).sample
# compute previous image: x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_lowerCAmelCase = self.scheduler.undo_step(lowercase__ , lowercase__ , lowercase__ )
_lowerCAmelCase = t
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 192 | 0 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE ( a_ : Optional[int] ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def SCREAMING_SNAKE_CASE ( a_ : List[Any] ):
__a = np.max(_outputs , axis=-1 , keepdims=a_ )
__a = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=a_ )
class __lowercase ( __magic_name__ ):
_a = """sigmoid"""
_a = """softmax"""
_a = """none"""
@add_end_docstrings(
__magic_name__ , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class __lowercase ( __magic_name__ ):
_a = False
_a = ClassificationFunction.NONE
def __init__( self , **UpperCamelCase ) -> Union[str, Any]:
super().__init__(**UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCamelCase__ ( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="" , **UpperCamelCase ) -> int:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__a = tokenizer_kwargs
__a = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
__a = self.model.config.return_all_scores
if isinstance(UpperCamelCase , UpperCamelCase ) or top_k is None:
__a = top_k
__a = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , UpperCamelCase , )
if return_all_scores:
__a = None
else:
__a = 1
if isinstance(UpperCamelCase , UpperCamelCase ):
__a = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__a = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *UpperCamelCase , **UpperCamelCase ) -> Dict:
__a = super().__call__(*UpperCamelCase , **UpperCamelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__a = 'top_k' not in kwargs
if isinstance(args[0] , UpperCamelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCamelCase__ ( self , UpperCamelCase , **UpperCamelCase ) -> Dict[str, GenericTensor]:
__a = self.framework
if isinstance(UpperCamelCase , UpperCamelCase ):
return self.tokenizer(**UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) == 1 and isinstance(inputs[0] , UpperCamelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCamelCase , **UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase ) -> List[Any]:
return self.model(**UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=True ) -> List[Any]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__a = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__a = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
__a = self.model.config.function_to_apply
else:
__a = ClassificationFunction.NONE
__a = model_outputs['logits'][0]
__a = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__a = sigmoid(UpperCamelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__a = softmax(UpperCamelCase )
elif function_to_apply == ClassificationFunction.NONE:
__a = outputs
else:
raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__a = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(UpperCamelCase )
]
if not _legacy:
dict_scores.sort(key=lambda UpperCamelCase : x["score"] , reverse=UpperCamelCase )
if top_k is not None:
__a = dict_scores[:top_k]
return dict_scores
| 715 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCAmelCase_ = False
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ) -> Tuple:
return 12
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
return 12
@property
def UpperCamelCase__ ( self ) -> List[str]:
return 32
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
__a = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCamelCase__ ( self ) -> Tuple:
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase )
@property
def UpperCamelCase__ ( self ) -> Tuple:
torch.manual_seed(0 )
__a = 12
__a = 12
__a = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__a = TransformeraDModel(**UpperCamelCase )
return model
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = 'cpu'
__a = self.dummy_vqvae
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_transformer
__a = VQDiffusionScheduler(self.num_embed )
__a = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase )
__a = VQDiffusionPipeline(
vqvae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , transformer=UpperCamelCase , scheduler=UpperCamelCase , learned_classifier_free_sampling_embeddings=UpperCamelCase , )
__a = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__a = 'teddy bear playing in the pool'
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipe([prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type='np' )
__a = output.images
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipe(
[prompt] , generator=UpperCamelCase , output_type='np' , return_dict=UpperCamelCase , num_inference_steps=2 )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__a = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ) -> int:
__a = 'cpu'
__a = self.dummy_vqvae
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_transformer
__a = VQDiffusionScheduler(self.num_embed )
__a = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__a = VQDiffusionPipeline(
vqvae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , transformer=UpperCamelCase , scheduler=UpperCamelCase , learned_classifier_free_sampling_embeddings=UpperCamelCase , )
__a = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__a = 'teddy bear playing in the pool'
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipe([prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type='np' )
__a = output.images
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipe(
[prompt] , generator=UpperCamelCase , output_type='np' , return_dict=UpperCamelCase , num_inference_steps=2 )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__a = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
__a = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
__a = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__a = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
__a = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCamelCase , output_type='np' , )
__a = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 490 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def A ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def A ( self ) -> str:
'''simple docstring'''
__lowercase = self.dummy_uncond_unet
__lowercase = KarrasVeScheduler()
__lowercase = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='''numpy''' ).images
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='''numpy''' , return_dict=snake_case_ )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A ( self ) -> int:
'''simple docstring'''
__lowercase = '''google/ncsnpp-celebahq-256'''
__lowercase = UNetaDModel.from_pretrained(snake_case_ )
__lowercase = KarrasVeScheduler()
__lowercase = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(num_inference_steps=2_0 , generator=snake_case_ , output_type='''numpy''' ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowercase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 639 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=snake_case , dtype=jnp.bfloataa )
UpperCamelCase__, UpperCamelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
UpperCamelCase__ = controlnet_params
UpperCamelCase__ = "bird"
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
UpperCamelCase__ = pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(snake_case , jax.device_count() )
UpperCamelCase__ = replicate(snake_case )
UpperCamelCase__ = shard(snake_case )
UpperCamelCase__ = shard(snake_case )
UpperCamelCase__ = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 253:256, 253:256, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=snake_case , dtype=jnp.bfloataa )
UpperCamelCase__, UpperCamelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
UpperCamelCase__ = controlnet_params
UpperCamelCase__ = "Chef in the kitchen"
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
UpperCamelCase__ = pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(snake_case , jax.device_count() )
UpperCamelCase__ = replicate(snake_case )
UpperCamelCase__ = shard(snake_case )
UpperCamelCase__ = shard(snake_case )
UpperCamelCase__ = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 253:256, 253:256, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 551 | 0 |
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
# We need to create solution object to save path.
SCREAMING_SNAKE_CASE = [[0 for _ in range(_UpperCAmelCase)] for _ in range(_UpperCAmelCase)]
SCREAMING_SNAKE_CASE = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase)
if solved:
print('\n'.join(str(_UpperCAmelCase) for row in solutions))
else:
print('No solution exists!')
return solved
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
# Final check point.
if i == j == (size - 1):
SCREAMING_SNAKE_CASE = 1
return True
SCREAMING_SNAKE_CASE = (not i < 0) and (not j < 0) # Check lower bounds
SCREAMING_SNAKE_CASE = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
SCREAMING_SNAKE_CASE = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
SCREAMING_SNAKE_CASE = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase)
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase)
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase)
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase)
):
return True
SCREAMING_SNAKE_CASE = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
from abc import ABC, abstractmethod
from typing import List, Optional
class _snake_case ( A__ ):
def __init__( self) -> int:
# test for the above condition
self.test()
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE = self.advance()
if not self.does_advance(a):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.update(a)
counter += 1
if counter > 1_0000:
raise Exception('update() does not fulfill the constraint.')
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.')
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self , a=False) -> int:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
class _snake_case ( A__ ):
def __init__( self , a) -> Dict:
super(a , self).__init__()
if not isinstance(a , a) or len(a) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''')
if any((not isinstance(a , a) or token_id < 0) for token_id in token_ids):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''')
SCREAMING_SNAKE_CASE = token_ids
SCREAMING_SNAKE_CASE = len(self.token_ids)
SCREAMING_SNAKE_CASE = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self , a) -> Any:
if not isinstance(a , a):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(a)}''')
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
if not isinstance(a , a):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(a)}''')
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
if self.does_advance(a):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self , a=False) -> int:
SCREAMING_SNAKE_CASE = PhrasalConstraint(self.token_ids)
if stateful:
SCREAMING_SNAKE_CASE = self.seqlen
SCREAMING_SNAKE_CASE = self.fulfilled_idx
SCREAMING_SNAKE_CASE = self.completed
return new_constraint
class _snake_case :
def __init__( self , a , a=True) -> Tuple:
SCREAMING_SNAKE_CASE = max([len(a) for one in nested_token_ids])
SCREAMING_SNAKE_CASE = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE = root
for tidx, token_id in enumerate(a):
if token_id not in level:
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = level[token_id]
if no_subsets and self.has_subsets(a , a):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
f''' {nested_token_ids}.''')
SCREAMING_SNAKE_CASE = root
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE = start[current_token]
SCREAMING_SNAKE_CASE = list(start.keys())
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
SCREAMING_SNAKE_CASE = self.next_tokens(a)
return len(a) == 0
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
SCREAMING_SNAKE_CASE = list(root.values())
if len(a) == 0:
return 1
else:
return sum([self.count_leaves(a) for nn in next_nodes])
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.count_leaves(a)
return len(a) != leaf_count
class _snake_case ( A__ ):
def __init__( self , a) -> Dict:
super(a , self).__init__()
if not isinstance(a , a) or len(a) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''')
if any(not isinstance(a , a) for token_ids in nested_token_ids):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''')
if any(
any((not isinstance(a , a) or token_id < 0) for token_id in token_ids)
for token_ids in nested_token_ids):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''')
SCREAMING_SNAKE_CASE = DisjunctiveTrie(a)
SCREAMING_SNAKE_CASE = nested_token_ids
SCREAMING_SNAKE_CASE = self.trie.max_height
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.trie.next_tokens(self.current_seq)
if len(a) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
if not isinstance(a , a):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(a)}''')
SCREAMING_SNAKE_CASE = self.trie.next_tokens(self.current_seq)
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
if not isinstance(a , a):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(a)}''')
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
if self.does_advance(a):
self.current_seq.append(a)
SCREAMING_SNAKE_CASE = True
else:
SCREAMING_SNAKE_CASE = True
self.reset()
SCREAMING_SNAKE_CASE = self.trie.reached_leaf(self.current_seq)
SCREAMING_SNAKE_CASE = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = []
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq)
def SCREAMING_SNAKE_CASE__ ( self , a=False) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = DisjunctiveConstraint(self.token_ids)
if stateful:
SCREAMING_SNAKE_CASE = self.seqlen
SCREAMING_SNAKE_CASE = self.current_seq
SCREAMING_SNAKE_CASE = self.completed
return new_constraint
class _snake_case :
def __init__( self , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE = max([c.seqlen for c in constraints])
SCREAMING_SNAKE_CASE = len(a)
SCREAMING_SNAKE_CASE = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = [constraint.copy(stateful=a) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE = constraint.advance()
if isinstance(a , a):
token_list.append(a)
elif isinstance(a , a):
token_list.extend(a)
else:
SCREAMING_SNAKE_CASE = self.inprogress_constraint.advance()
if isinstance(a , a):
token_list.append(a)
elif isinstance(a , a):
token_list.extend(a)
if len(a) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.add(a)
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
if not isinstance(a , a):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False, False
if self.completed:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.inprogress_constraint.update(a)
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a))
SCREAMING_SNAKE_CASE = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint)
SCREAMING_SNAKE_CASE = None
if len(self.pending_constraints) == 0:
# we're done!
SCREAMING_SNAKE_CASE = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints):
if pending_constraint.does_advance(a):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pending_constraint.update(a)
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.')
if complete:
self.complete_constraints.append(a)
SCREAMING_SNAKE_CASE = None
if not complete and stepped:
SCREAMING_SNAKE_CASE = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self , a=True) -> str:
SCREAMING_SNAKE_CASE = ConstraintListState(self.constraints) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE = [
constraint.copy(stateful=a) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE = self.inprogress_constraint.copy(stateful=a)
SCREAMING_SNAKE_CASE = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 444 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_lowerCamelCase = pytest.mark.integration
@require_faiss
class _snake_case (A__):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase__ ( self ):
import faiss
UpperCAmelCase_ : Union[str, Any] = self._create_dummy_dataset()
UpperCAmelCase_ : Dict = dset.map(
lambda _snake_case ,_snake_case : {"vecs": i * np.ones(5 ,dtype=np.floataa )} ,with_indices=__UpperCAmelCase ,keep_in_memory=__UpperCAmelCase )
UpperCAmelCase_ : List[str] = dset.add_faiss_index("vecs" ,batch_size=1_00 ,metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase_ , UpperCAmelCase_ : Any = dset.get_nearest_examples("vecs" ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
dset.drop_index("vecs" )
def UpperCamelCase__ ( self ):
import faiss
UpperCAmelCase_ : Optional[int] = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name="vecs" ,batch_size=1_00 ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dset.get_nearest_examples("vecs" ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
def UpperCamelCase__ ( self ):
import faiss
UpperCAmelCase_ : Optional[int] = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name="vecs" ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index("vecs" ,tmp_file.name )
dset.load_faiss_index("vecs2" ,tmp_file.name )
os.unlink(tmp_file.name )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = dset.get_nearest_examples("vecs2" ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__UpperCAmelCase ,partial(dset.get_nearest_examples ,"vecs2" ,np.ones(5 ,dtype=np.floataa ) ) )
def UpperCamelCase__ ( self ):
from elasticsearch import Elasticsearch
UpperCAmelCase_ : int = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCAmelCase_ : Optional[Any] = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
UpperCAmelCase_ : int = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
UpperCAmelCase_ : Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index("filename" ,es_client=__UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dset.get_nearest_examples("filename" ,"my_name-train_29" )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
@require_faiss
class _snake_case (A__):
def UpperCamelCase__ ( self ):
import faiss
UpperCAmelCase_ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal ,5 )
index.add_vectors(np.zeros((5, 5) ,dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal ,10 )
# single query
UpperCAmelCase_ : Any = np.zeros(5 ,dtype=np.floataa )
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase ,index.search ,query.reshape(-1 ,1 ) )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
# batched queries
UpperCAmelCase_ : List[Any] = np.eye(5 ,dtype=np.floataa )[::-1]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase ,index.search_batch ,queries[0] )
UpperCAmelCase_ : Optional[Any] = [scores[0] for scores in total_scores]
UpperCAmelCase_ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) ,0 )
self.assertListEqual([4, 3, 2, 1, 0] ,__UpperCAmelCase )
def UpperCamelCase__ ( self ):
import faiss
UpperCAmelCase_ : str = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
UpperCAmelCase_ : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_ : str = FaissIndex(string_factory="Flat" ,custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase__ ( self ):
import faiss
UpperCAmelCase_ : str = faiss.IndexFlat(5 )
UpperCAmelCase_ : int = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
def UpperCamelCase__ ( self ):
import faiss
UpperCAmelCase_ : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
UpperCAmelCase_ : Optional[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCAmelCase_ : int = np.zeros(5 ,dtype=np.floataa )
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
@require_faiss
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
"""simple docstring"""
import faiss
UpperCAmelCase_ : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCAmelCase_ : Optional[int] = "index.faiss"
UpperCAmelCase_ : List[Any] = F'''mock://{index_name}'''
index.save(snake_case__ , storage_options=mockfs.storage_options )
UpperCAmelCase_ : Optional[Any] = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
UpperCAmelCase_ : Tuple = np.zeros(5 , dtype=np.floataa )
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ , UpperCAmelCase_ : int = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case (A__):
def UpperCamelCase__ ( self ):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCAmelCase_ : Optional[int] = Elasticsearch()
UpperCAmelCase_ : Optional[Any] = {"acknowledged": True}
UpperCAmelCase_ : List[Any] = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
UpperCAmelCase_ : Dict = "foo"
UpperCAmelCase_ : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# single query with timeout
UpperCAmelCase_ : Optional[int] = "foo"
UpperCAmelCase_ : Optional[int] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = index.search(__UpperCAmelCase ,request_timeout=30 )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# batched queries
UpperCAmelCase_ : Optional[Any] = ["foo", "bar", "foobar"]
UpperCAmelCase_ : Any = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = index.search_batch(__UpperCAmelCase )
UpperCAmelCase_ : int = [scores[0] for scores in total_scores]
UpperCAmelCase_ : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) ,0 )
self.assertListEqual([1, 1, 1] ,__UpperCAmelCase )
# batched queries with timeout
UpperCAmelCase_ : Union[str, Any] = ["foo", "bar", "foobar"]
UpperCAmelCase_ : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCAmelCase_ , UpperCAmelCase_ : int = index.search_batch(__UpperCAmelCase ,request_timeout=30 )
UpperCAmelCase_ : Dict = [scores[0] for scores in total_scores]
UpperCAmelCase_ : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) ,0 )
self.assertListEqual([1, 1, 1] ,__UpperCAmelCase )
| 71 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase (A__ ):
lowerCamelCase__ : Optional[float] = field(
default=0.0 ,metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
lowerCamelCase__ : bool = field(default=A__ ,metadata={'help': 'Whether to SortishSamler or not.'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : bool = field(default=A__ ,metadata={'help': 'whether to use adafactor'} )
lowerCamelCase__ : Optional[float] = field(
default=A__ ,metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
lowerCamelCase__ : Optional[float] = field(
default=A__ ,metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
lowerCamelCase__ : Optional[float] = field(default=A__ ,metadata={'help': 'Dropout probability. Goes into model.config.'} )
lowerCamelCase__ : Optional[float] = field(
default=A__ ,metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
lowerCamelCase__ : Optional[str] = field(
default='linear' ,metadata={'help': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} ,)
| 196 | 0 |
'''simple docstring'''
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 718 |
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = data
# Initialize hash values
lowerCamelCase__ = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
lowerCamelCase__ = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
lowerCamelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64))
lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase_ ( self ):
# Convert into blocks of 64 bytes
lowerCamelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase__ = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase__ = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 )
lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
lowerCamelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 )
lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
lowerCamelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase__ = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
import hashlib
lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() )
def A__ ( ):
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" )
print(SHAaaa(__lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 9 | 0 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_lowerCAmelCase :Optional[Any] = datasets.utils.logging.get_logger(__name__)
class _UpperCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
a__ =None
a__ =None
class _UpperCAmelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
a__ =datasets.Audio()
a__ ="""audio"""
a__ =AudioFolderConfig
a__ =42 # definition at the bottom of the script
a__ =AudioClassification(audio_column='''audio''' ,label_column='''label''' )
_lowerCAmelCase :Optional[Any] = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
_lowerCAmelCase :Union[str, Any] = AUDIO_EXTENSIONS
| 506 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_lowercase = logging.get_logger(__name__)
class __a ( __a ):
'''simple docstring'''
def __init__( self , **_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["bs4"] )
super().__init__(**_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = []
__lowercase = []
__lowercase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__lowercase = parent.find_all(child.name , recursive=_lowerCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_lowerCamelCase ) else next(i for i, s in enumerate(_lowerCamelCase , 1 ) if s is child ) )
__lowercase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = BeautifulSoup(_lowerCamelCase , "html.parser" )
__lowercase = []
__lowercase = []
__lowercase = []
for element in html_code.descendants:
if type(_lowerCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__lowercase = html.unescape(_lowerCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_lowerCamelCase )
__lowercase , __lowercase = self.xpath_soup(_lowerCamelCase )
stringaxtag_seq.append(_lowerCamelCase )
stringaxsubs_seq.append(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = ""
for tagname, subs in zip(_lowerCamelCase , _lowerCamelCase ):
xpath += f'''/{tagname}'''
if subs != 0:
xpath += f'''[{subs}]'''
return xpath
def __call__( self , _lowerCamelCase ) -> BatchFeature:
'''simple docstring'''
__lowercase = False
# Check that strings has a valid type
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowercase = True
elif isinstance(_lowerCamelCase , (list, tuple) ):
if len(_lowerCamelCase ) == 0 or isinstance(html_strings[0] , _lowerCamelCase ):
__lowercase = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f'''but is of type {type(_lowerCamelCase )}.''' )
__lowercase = bool(isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , _lowerCamelCase )) )
if not is_batched:
__lowercase = [html_strings]
# Get nodes + xpaths
__lowercase = []
__lowercase = []
for html_string in html_strings:
__lowercase , __lowercase , __lowercase = self.get_three_from_single(_lowerCamelCase )
nodes.append(_lowerCamelCase )
__lowercase = []
for node, tag_list, sub_list in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__lowercase = self.construct_xpath(_lowerCamelCase , _lowerCamelCase )
xpath_strings.append(_lowerCamelCase )
xpaths.append(_lowerCamelCase )
# return as Dict
__lowercase = {"nodes": nodes, "xpaths": xpaths}
__lowercase = BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
return encoded_inputs
| 118 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'blip_2_vision_model'
def __init__( self , A_=14_08 , A_=61_44 , A_=39 , A_=16 , A_=2_24 , A_=14 , A_="gelu" , A_=0.00001 , A_=0.0 , A_=1E-1_0 , A_=True , **A_ , ) -> int:
"""simple docstring"""
super().__init__(**A_ )
_lowerCamelCase = hidden_size
_lowerCamelCase = intermediate_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = patch_size
_lowerCamelCase = image_size
_lowerCamelCase = initializer_range
_lowerCamelCase = attention_dropout
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = hidden_act
_lowerCamelCase = qkv_bias
@classmethod
def UpperCamelCase_ ( cls , A_ , **A_ ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A_ )
_lowerCamelCase , _lowerCamelCase = cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
_lowerCamelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'blip_2_qformer'
def __init__( self , A_=3_05_22 , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=0.02 , A_=1E-1_2 , A_=0 , A_="absolute" , A_=2 , A_=14_08 , **A_ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=A_ , **A_ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = cross_attention_frequency
_lowerCamelCase = encoder_hidden_size
@classmethod
def UpperCamelCase_ ( cls , A_ , **A_ ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A_ )
_lowerCamelCase , _lowerCamelCase = cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
_lowerCamelCase = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'blip-2'
A_ = True
def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> str:
"""simple docstring"""
super().__init__(**A_ )
if vision_config is None:
_lowerCamelCase = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
_lowerCamelCase = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
_lowerCamelCase = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
_lowerCamelCase = BlipaVisionConfig(**A_ )
_lowerCamelCase = BlipaQFormerConfig(**A_ )
_lowerCamelCase = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
_lowerCamelCase = CONFIG_MAPPING[text_model_type](**A_ )
_lowerCamelCase = self.text_config.tie_word_embeddings
_lowerCamelCase = self.text_config.is_encoder_decoder
_lowerCamelCase = num_query_tokens
_lowerCamelCase = self.vision_config.hidden_size
_lowerCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCamelCase = 1.0
_lowerCamelCase = 0.02
@classmethod
def UpperCamelCase_ ( cls , A_ , A_ , A_ , **A_ , ) -> Tuple:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.vision_config.to_dict()
_lowerCamelCase = self.qformer_config.to_dict()
_lowerCamelCase = self.text_config.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
| 720 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 638 | 0 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowercase ( _A):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 42
UpperCAmelCase : Any = None
def __a ( __lowerCAmelCase , __lowerCAmelCase=0.999 , __lowerCAmelCase="cosine" , ) -> Any:
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Any = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ) , __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase , dtype=torch.floataa )
class lowercase ( _A , _A):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 1
@register_to_config
def __init__( self : Dict , snake_case : int = 1000 , snake_case : float = 0.0001 , snake_case : float = 0.02 , snake_case : str = "linear" , snake_case : Optional[Union[np.ndarray, List[float]]] = None , snake_case : bool = True , snake_case : bool = True , snake_case : int = 0 , snake_case : str = "epsilon" , snake_case : float = 1.0 , **snake_case : str , ):
'''simple docstring'''
if kwargs.get('set_alpha_to_one' , __lowerCamelCase ) is not None:
SCREAMING_SNAKE_CASE : List[Any] = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , __lowerCamelCase , standard_warn=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs['set_alpha_to_one']
if trained_betas is not None:
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(__lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE : Any = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE : Optional[int] = betas_for_alpha_bar(__lowerCamelCase )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
SCREAMING_SNAKE_CASE : int = 1.0 - self.betas
SCREAMING_SNAKE_CASE : str = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : List[str] = 1.0
# setable values
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.arange(0 , __lowerCamelCase ).copy().astype(np.intaa ) )
def lowerCamelCase_ ( self : Any , snake_case : torch.FloatTensor , snake_case : Optional[int] = None ):
'''simple docstring'''
return sample
def lowerCamelCase_ ( self : str , snake_case : int , snake_case : Union[str, torch.device] = None ):
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
SCREAMING_SNAKE_CASE : Dict = num_inference_steps
SCREAMING_SNAKE_CASE : Any = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE : List[str] = (np.arange(0 , __lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
SCREAMING_SNAKE_CASE : Any = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
self.timesteps += self.config.steps_offset
def lowerCamelCase_ ( self : int , snake_case : torch.FloatTensor , snake_case : int , snake_case : torch.FloatTensor , snake_case : float = 0.0 , snake_case : bool = False , snake_case : Optional[torch.FloatTensor] = None , snake_case : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
SCREAMING_SNAKE_CASE : Optional[Any] = self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE : str = model_output
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE : int = model_output
SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
SCREAMING_SNAKE_CASE : int = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase )
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 352 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = "▁"
A_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
A_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
A_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
A_ = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
A_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __lowercase ( _A ):
lowercase = ["input_ids"]
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = RESOURCE_FILES_NAMES
def __init__( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=False , __lowerCamelCase : int="utf8" , __lowerCamelCase : Union[str, Any]="[UNK]" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : str="[CLS]" , __lowerCamelCase : List[Any]="[MASK]" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Optional[int] , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , vocab_file=__lowerCamelCase , encoding=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
lowercase = do_lower_case
lowercase = sentencepiece_model_ckpt
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase = self.load_vocab(filepath=__lowerCamelCase )
else:
lowercase = {self.sp_model.id_to_piece(__lowerCamelCase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase = {v: k for k, v in self.vocab.items()}
def __a ( self : int , __lowerCamelCase : List[str] ) -> str:
'''simple docstring'''
if text is None:
return None
lowercase = self.tokenize(__lowerCamelCase )
lowercase ,lowercase = '''''', []
for i, ch in enumerate(__lowerCamelCase ):
if ch in self.SP_CHAR_MAPPING:
lowercase = self.SP_CHAR_MAPPING.get(__lowerCamelCase )
else:
lowercase = unicodedata.normalize('''NFKC''' , __lowerCamelCase )
if self.is_whitespace(__lowerCamelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__lowerCamelCase ) )
lowercase ,lowercase ,lowercase = normalized_text, [], 0
if self.do_lower_case:
lowercase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase = token[1:]
lowercase = text[offset:].index(__lowerCamelCase ) + offset
lowercase = start + len(__lowerCamelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase = end
return token_mapping
@property
def __a ( self : str ) -> Tuple:
'''simple docstring'''
return len(self.vocab )
def __a ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Tuple ) -> Dict:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self : int , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __a ( self : Any , __lowerCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(__lowerCamelCase , __lowerCamelCase ) for c in text) )
def __a ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Optional[Any]=64 , __lowerCamelCase : Any=0.1 ) -> int:
'''simple docstring'''
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowercase = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowercase = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowercase = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowercase = self.sp_model.EncodeAsPieces(__lowerCamelCase )
else:
lowercase = self.sp_model.SampleEncodeAsPieces(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase = []
for pi, piece in enumerate(__lowerCamelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__lowerCamelCase ) and pi != 0:
new_pieces.append(__lowerCamelCase )
continue
else:
continue
lowercase = 0
for i, chunk in enumerate(__lowerCamelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__lowerCamelCase ) or self.is_punct(__lowerCamelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__lowerCamelCase )
lowercase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase = i
if len(__lowerCamelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __a ( self : str , __lowerCamelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase = ''''''.join(__lowerCamelCase ).replace(__lowerCamelCase , ''' ''' ).strip()
return out_string
def __a ( self : List[str] , __lowerCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase = self.convert_ids_to_tokens(__lowerCamelCase )
lowercase = ''''''.join(__lowerCamelCase ).replace(__lowerCamelCase , ''' ''' ).strip()
return out_string
def __a ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
'''simple docstring'''
return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token ) )
def __a ( self : int , __lowerCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
return self.reverse_vocab.get(__lowerCamelCase , self.unk_token )
def __a ( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=None ) -> str:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __a ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int=None ) -> Any:
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __a ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=False ) -> Optional[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def __a ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__lowerCamelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__lowerCamelCase ) + 1) + [1] * (len(__lowerCamelCase ) + 3)
def __a ( self : Union[str, Any] , __lowerCamelCase : Dict ) -> str:
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __a ( self : Tuple , __lowerCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __a ( self : Dict , __lowerCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __a ( self : Optional[Any] , __lowerCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__lowerCamelCase ) == 1:
lowercase = unicodedata.category(__lowerCamelCase )
if cat == "Zs":
return True
return False
def __a ( self : Tuple , __lowerCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase = {}
with io.open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__lowerCamelCase ):
lowercase = line.rstrip('''\n''' )
lowercase = int(__lowerCamelCase )
return token_to_idx
def __a ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase = 0
if os.path.isdir(__lowerCamelCase ):
lowercase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
lowercase = token_index
writer.write(token + '''\n''' )
index += 1
lowercase = os.path.join(__lowerCamelCase , '''sentencepiece.bpe.model''' )
with open(__lowerCamelCase , '''wb''' ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (vocab_file,)
| 604 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( _UpperCamelCase : list[int] ):
'''simple docstring'''
return len(set(_UpperCamelCase ) ) == len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : str ) ->List[str]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_euler''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : List[str] ) ->int:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_euler''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCAmelCase__ , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 43 | 1 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> str:
lowerCamelCase_ = []
for line in lines:
lowerCamelCase_ = re.sub(r'#.*' , '' , _lowerCamelCase ) # remove comments
if line:
filtered_lines.append(_lowerCamelCase )
lowerCamelCase_ = '\n'.join(_lowerCamelCase )
# Make a hash from all this code
lowerCamelCase_ = full_str.encode('utf-8' )
return shaaaa(_lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
_SCREAMING_SNAKE_CASE : Any = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_SCREAMING_SNAKE_CASE : int = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
_SCREAMING_SNAKE_CASE : Any = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 549 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__=True , UpperCamelCase__=5_12 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ):
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = hidden_dropout
A__ = attention_dropout
A__ = weight_tying
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def lowercase_ ( self ):
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self ):
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = True
return config, input_ids, input_mask, token_labels
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = GPTNeoXJapaneseModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
A__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = True
A__ = GPTNeoXJapaneseModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = True
A__ = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat([input_mask, next_mask] , dim=-1 )
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
A__ = output_from_no_past["hidden_states"][0]
A__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowercase__ : Optional[int] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowercase__ : Optional[Any] = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowercase__ : Any = False
lowercase__ : str = False
lowercase__ : Tuple = False
lowercase__ : str = False
def lowercase_ ( self ):
'''simple docstring'''
A__ = GPTNeoXJapaneseModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase__ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = "abeja/gpt-neox-japanese-2.7b"
A__ = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
A__ = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
A__ = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase__ )
A__ = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase__ )
A__ = []
for prompt in prompts:
A__ = tokenizer(UpperCamelCase__ , return_tensors="pt" ).input_ids
A__ = model.generate(UpperCamelCase__ , max_length=50 )
A__ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 337 | 0 |
"""simple docstring"""
def a_ ( lowerCamelCase = 1_0 ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or n < 0:
raise ValueError('Invalid input' )
UpperCAmelCase__ = 1_0**n
UpperCAmelCase__ = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowerCamelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 632 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : int = 50_257 ,lowerCamelCase__ : int = 1_024 ,lowerCamelCase__ : int = 768 ,lowerCamelCase__ : int = 12 ,lowerCamelCase__ : int = 12 ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : str = "gelu_new" ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : float = 0.0_2 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,):
super().__init__()
UpperCAmelCase__ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
UpperCAmelCase__ = prefix_inner_dim
UpperCAmelCase__ = prefix_hidden_dim
UpperCAmelCase__ = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase__ = (
nn.Linear(self.prefix_hidden_dim ,lowerCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase__ = GPTaConfig(
vocab_size=lowerCamelCase__ ,n_positions=lowerCamelCase__ ,n_embd=lowerCamelCase__ ,n_layer=lowerCamelCase__ ,n_head=lowerCamelCase__ ,n_inner=lowerCamelCase__ ,activation_function=lowerCamelCase__ ,resid_pdrop=lowerCamelCase__ ,embd_pdrop=lowerCamelCase__ ,attn_pdrop=lowerCamelCase__ ,layer_norm_epsilon=lowerCamelCase__ ,initializer_range=lowerCamelCase__ ,scale_attn_weights=lowerCamelCase__ ,use_cache=lowerCamelCase__ ,scale_attn_by_inverse_layer_idx=lowerCamelCase__ ,reorder_and_upcast_attn=lowerCamelCase__ ,)
UpperCAmelCase__ = GPTaLMHeadModel(lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : torch.Tensor ,lowerCamelCase__ : torch.Tensor ,lowerCamelCase__ : Optional[torch.Tensor] = None ,lowerCamelCase__ : Optional[torch.Tensor] = None ,):
UpperCAmelCase__ = self.transformer.transformer.wte(lowerCamelCase__ )
UpperCAmelCase__ = self.encode_prefix(lowerCamelCase__ )
UpperCAmelCase__ = self.decode_prefix(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat((prefix_embeds, embedding_text) ,dim=1 )
if labels is not None:
UpperCAmelCase__ = self.get_dummy_token(input_ids.shape[0] ,input_ids.device )
UpperCAmelCase__ = torch.cat((dummy_token, input_ids) ,dim=1 )
UpperCAmelCase__ = self.transformer(inputs_embeds=lowerCamelCase__ ,labels=lowerCamelCase__ ,attention_mask=lowerCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : torch.device ):
return torch.zeros(lowerCamelCase__ ,self.prefix_length ,dtype=torch.intaa ,device=lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[str] ):
return self.encode_prefix(lowerCamelCase__ )
@torch.no_grad()
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = torch.split(lowerCamelCase__ ,1 ,dim=0 )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for feature in features:
UpperCAmelCase__ = self.decode_prefix(feature.to(lowerCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
UpperCAmelCase__ , UpperCAmelCase__ = self.generate_beam(
input_embeds=lowerCamelCase__ ,device=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCAmelCase__ = torch.stack(lowerCamelCase__ )
UpperCAmelCase__ = torch.stack(lowerCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str=None ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 67 ,lowerCamelCase__ : float = 1.0 ,lowerCamelCase__ : Optional[int] = None ,):
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = torch.ones(lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=torch.int )
UpperCAmelCase__ = torch.zeros(lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=torch.bool )
if input_embeds is not None:
UpperCAmelCase__ = input_embeds
else:
UpperCAmelCase__ = self.transformer.transformer.wte(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
UpperCAmelCase__ = self.transformer(inputs_embeds=lowerCamelCase__ )
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase__ = logits.softmax(-1 ).log()
if scores is None:
UpperCAmelCase__ , UpperCAmelCase__ = logits.topk(lowerCamelCase__ ,-1 )
UpperCAmelCase__ = generated.expand(lowerCamelCase__ ,*generated.shape[1:] )
UpperCAmelCase__ , UpperCAmelCase__ = next_tokens.permute(1 ,0 ), scores.squeeze(0 )
if tokens is None:
UpperCAmelCase__ = next_tokens
else:
UpperCAmelCase__ = tokens.expand(lowerCamelCase__ ,*tokens.shape[1:] )
UpperCAmelCase__ = torch.cat((tokens, next_tokens) ,dim=1 )
else:
UpperCAmelCase__ = -float(np.inf )
UpperCAmelCase__ = 0
UpperCAmelCase__ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase__ = scores_sum / seq_lengths[:, None]
UpperCAmelCase__ , UpperCAmelCase__ = scores_sum_average.view(-1 ).topk(lowerCamelCase__ ,-1 )
UpperCAmelCase__ = next_tokens // scores_sum.shape[1]
UpperCAmelCase__ = seq_lengths[next_tokens_source]
UpperCAmelCase__ = next_tokens % scores_sum.shape[1]
UpperCAmelCase__ = next_tokens.unsqueeze(1 )
UpperCAmelCase__ = tokens[next_tokens_source]
UpperCAmelCase__ = torch.cat((tokens, next_tokens) ,dim=1 )
UpperCAmelCase__ = generated[next_tokens_source]
UpperCAmelCase__ = scores_sum_average * seq_lengths
UpperCAmelCase__ = is_stopped[next_tokens_source]
UpperCAmelCase__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 )
UpperCAmelCase__ = torch.cat((generated, next_token_embed) ,dim=1 )
UpperCAmelCase__ = is_stopped + next_tokens.eq(lowerCamelCase__ ).squeeze()
if is_stopped.all():
break
UpperCAmelCase__ = scores / seq_lengths
UpperCAmelCase__ = scores.argsort(descending=lowerCamelCase__ )
# tokens tensors are already padded to max_seq_length
UpperCAmelCase__ = [tokens[i] for i in order]
UpperCAmelCase__ = torch.stack(lowerCamelCase__ ,dim=0 )
UpperCAmelCase__ = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 632 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : int=7 , lowerCamelCase : str=3 , lowerCamelCase : Optional[int]=30 , lowerCamelCase : Dict=400 , lowerCamelCase : str=True , lowerCamelCase : str=None , lowerCamelCase : Any=True , lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=1 / 255 , lowerCamelCase : Any=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__snake_case : Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
__snake_case : Optional[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : Optional[int] = num_channels
__snake_case : str = min_resolution
__snake_case : int = max_resolution
__snake_case : int = do_resize
__snake_case : Tuple = size
__snake_case : Any = do_normalize
__snake_case : int = image_mean
__snake_case : Tuple = image_std
__snake_case : Dict = do_rescale
__snake_case : Optional[Any] = rescale_factor
__snake_case : str = do_pad
def __snake_case ( self : Any ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=False ) -> List[str]:
if not batched:
__snake_case : Dict = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__snake_case , __snake_case : Dict = image.size
else:
__snake_case , __snake_case : List[str] = image.shape[1], image.shape[2]
if w < h:
__snake_case : Optional[int] = int(self.size["shortest_edge"] * h / w )
__snake_case : int = self.size["shortest_edge"]
elif w > h:
__snake_case : List[str] = self.size["shortest_edge"]
__snake_case : Optional[Any] = int(self.size["shortest_edge"] * w / h )
else:
__snake_case : List[Any] = self.size["shortest_edge"]
__snake_case : Any = self.size["shortest_edge"]
else:
__snake_case : int = []
for image in image_inputs:
__snake_case , __snake_case : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__snake_case : str = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = ConditionalDetrImageProcessor if is_vision_available() else None
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : Optional[Any] = ConditionalDetrImageProcessingTester(self )
@property
def __snake_case ( self : Any ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def __snake_case ( self : Any ) -> Dict:
__snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
__snake_case : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> str:
# Initialize image_processing
__snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : int ) -> str:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : List[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : int ) -> List[str]:
# Initialize image_processing
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : List[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : int = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case ( self : Any ) -> Optional[int]:
# prepare image and target
__snake_case : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__snake_case : str = json.loads(f.read() )
__snake_case : List[Any] = {"image_id": 39769, "annotations": target}
# encode them
__snake_case : List[str] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
__snake_case : List[str] = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__snake_case : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__snake_case : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__snake_case : List[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__snake_case : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__snake_case : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__snake_case : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__snake_case : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
__snake_case : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__snake_case : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def __snake_case ( self : str ) -> Tuple:
# prepare image, target and masks_path
__snake_case : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__snake_case : str = json.loads(f.read() )
__snake_case : str = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
__snake_case : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__snake_case : int = ConditionalDetrImageProcessor(format="coco_panoptic" )
__snake_case : str = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
__snake_case : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
__snake_case : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
__snake_case : Any = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
__snake_case : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
__snake_case : Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
__snake_case : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
__snake_case : int = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
__snake_case : List[Any] = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
__snake_case : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
__snake_case : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 81 |
def __UpperCamelCase ( lowerCAmelCase__ : list ):
for i in range(len(lowerCAmelCase__ ) - 1 , 0 , -1 ):
__a : Optional[Any] = False
for j in range(lowerCAmelCase__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__a , __a : Optional[int] = unsorted[j - 1], unsorted[j]
__a : Union[str, Any] = True
for j in range(lowerCAmelCase__ ):
if unsorted[j] > unsorted[j + 1]:
__a , __a : str = unsorted[j + 1], unsorted[j]
__a : str = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =input('Enter numbers separated by a comma:\n').strip()
lowercase__ =[int(item) for item in user_input.split(',')]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 521 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class A ( _UpperCAmelCase ):
lowercase_ = '''switch_transformers'''
lowercase_ = ['''past_key_values''']
lowercase_ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[int] , UpperCamelCase_ : List[str]=32128 , UpperCamelCase_ : Tuple=768 , UpperCamelCase_ : List[str]=64 , UpperCamelCase_ : Optional[Any]=2048 , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : int=3 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : List[str]=12 , UpperCamelCase_ : List[Any]=8 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Dict=0.01 , UpperCamelCase_ : Dict="float32" , UpperCamelCase_ : int=False , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : Optional[int]=128 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Any=1e-6 , UpperCamelCase_ : int=0.001 , UpperCamelCase_ : Union[str, Any]=0.001 , UpperCamelCase_ : Dict=1.0 , UpperCamelCase_ : List[str]="relu" , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Dict=0 , UpperCamelCase_ : List[Any]=1 , **UpperCamelCase_ : Dict , ):
"""simple docstring"""
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Any = d_model
__UpperCAmelCase : int = d_kv
__UpperCAmelCase : List[str] = d_ff
__UpperCAmelCase : str = num_sparse_encoder_layers
__UpperCAmelCase : str = num_layers
__UpperCAmelCase : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCAmelCase : Tuple = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__UpperCAmelCase : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__UpperCAmelCase : List[str] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__UpperCAmelCase : Any = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__UpperCAmelCase : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__UpperCAmelCase : Dict = num_heads
__UpperCAmelCase : List[str] = num_experts
__UpperCAmelCase : int = expert_capacity
__UpperCAmelCase : List[Any] = router_bias
__UpperCAmelCase : Any = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
__UpperCAmelCase : Optional[int] = router_dtype
__UpperCAmelCase : str = router_ignore_padding_tokens
__UpperCAmelCase : int = relative_attention_num_buckets
__UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
__UpperCAmelCase : List[str] = dropout_rate
__UpperCAmelCase : Union[str, Any] = layer_norm_epsilon
__UpperCAmelCase : Union[str, Any] = initializer_factor
__UpperCAmelCase : str = feed_forward_proj
__UpperCAmelCase : List[Any] = use_cache
__UpperCAmelCase : Union[str, Any] = add_router_probs
__UpperCAmelCase : Optional[int] = router_z_loss_coef
__UpperCAmelCase : List[Any] = router_aux_loss_coef
__UpperCAmelCase : Union[str, Any] = self.feed_forward_proj.split("-")
__UpperCAmelCase : str = act_info[-1]
__UpperCAmelCase : Union[str, Any] = act_info[0] == "gated"
if len(A_) > 1 and act_info[0] != "gated" or len(A_) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"\'gated-gelu\' or \'relu\'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__UpperCAmelCase : Optional[int] = "gelu_new"
super().__init__(
pad_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , **A_ , )
| 705 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
A = logging.getLogger(__name__)
def _UpperCamelCase ( UpperCamelCase ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = git.Repo(search_parent_directories=UpperCamelCase )
__UpperCAmelCase : Any = {
"repo_id": str(UpperCamelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(UpperCamelCase , "git_log.json" ) , "w" ) as f:
json.dump(UpperCamelCase , UpperCamelCase , indent=4 )
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
if params.n_gpu <= 0:
__UpperCAmelCase : str = 0
__UpperCAmelCase : Dict = -1
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : List[str] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__UpperCAmelCase : Optional[int] = int(os.environ["WORLD_SIZE"] )
__UpperCAmelCase : Union[str, Any] = int(os.environ["N_GPU_NODE"] )
__UpperCAmelCase : Optional[int] = int(os.environ["RANK"] )
# number of nodes / node ID
__UpperCAmelCase : int = params.world_size // params.n_gpu_per_node
__UpperCAmelCase : Optional[int] = params.global_rank // params.n_gpu_per_node
__UpperCAmelCase : Tuple = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__UpperCAmelCase : Union[str, Any] = 1
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Any = 1
__UpperCAmelCase : str = 1
__UpperCAmelCase : Dict = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__UpperCAmelCase : Union[str, Any] = params.node_id == 0 and params.local_rank == 0
__UpperCAmelCase : Dict = params.n_nodes > 1
# summary
__UpperCAmelCase : Any = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _UpperCamelCase ( UpperCamelCase ) -> Tuple:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 487 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.