code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase ( UpperCAmelCase__ ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = 5
# Realm tok
_lowerCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCAmelCase = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
_lowerCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_lowerCAmelCase = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
def __lowerCAmelCase ( self ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def __lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def __lowerCAmelCase ( self ):
_lowerCAmelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def __lowerCAmelCase ( self ):
_lowerCAmelCase = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=__lowerCAmelCase , )
return block_records
def __lowerCAmelCase ( self ):
_lowerCAmelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = self.get_dummy_retriever()
_lowerCAmelCase = retriever.tokenizer
_lowerCAmelCase = np.array([0, 3] , dtype='''long''' )
_lowerCAmelCase = tokenizer(['''Test question'''] ).input_ids
_lowerCAmelCase = tokenizer(
['''the fourth'''] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
_lowerCAmelCase = config.reader_seq_len
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='''np''' )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = self.get_dummy_retriever()
_lowerCAmelCase = retriever.tokenizer
_lowerCAmelCase = np.array([0, 3, 5] , dtype='''long''' )
_lowerCAmelCase = tokenizer(['''Test question'''] ).input_ids
_lowerCAmelCase = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
_lowerCAmelCase = config.reader_seq_len
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='''np''' )
self.assertEqual([False, True, True] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_lowerCAmelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_lowerCAmelCase = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_lowerCAmelCase = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 710 |
import numpy
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
_lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCAmelCase = numpy.zeros(output_array.shape )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __lowerCAmelCase ( self ):
_lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
for iteration in range(1 , iterations + 1 ):
_lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = input_arr
_lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return (value) * (1 - (value))
def UpperCAmelCase__ ( )->int:
_lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=1_0 , give_loss=_SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->Dict:
_lowerCAmelCase = args.pruning_method
_lowerCAmelCase = args.threshold
_lowerCAmelCase = args.model_name_or_path.rstrip('''/''' )
_lowerCAmelCase = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowerCAmelCase = torch.load(os.path.join(_snake_case , '''pytorch_model.bin''' ) )
_lowerCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowerCAmelCase = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowerCAmelCase = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowerCAmelCase = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case )
_lowerCAmelCase = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowerCAmelCase = name[:-6]
_lowerCAmelCase = model[f'''{prefix_}mask_scores''']
_lowerCAmelCase = TopKBinarizer.apply(_snake_case , _snake_case )
_lowerCAmelCase = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowerCAmelCase = name[:-6]
_lowerCAmelCase = model[f'''{prefix_}mask_scores''']
_lowerCAmelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case )
_lowerCAmelCase = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowerCAmelCase = name[:-6]
_lowerCAmelCase = model[f'''{prefix_}mask_scores''']
_lowerCAmelCase , _lowerCAmelCase = -0.1, 1.1
_lowerCAmelCase = torch.sigmoid(_snake_case )
_lowerCAmelCase = s * (r - l) + l
_lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
_lowerCAmelCase = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
_lowerCAmelCase = os.path.join(
os.path.dirname(_snake_case ) , f'''bertarized_{os.path.basename(_snake_case )}''' )
if not os.path.isdir(_snake_case ):
shutil.copytree(_snake_case , _snake_case )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_snake_case , os.path.join(_snake_case , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
UpperCAmelCase_ = parser.parse_args()
main(args)
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
import numpy as np
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple )->List[Any]:
return 1 / (1 + np.exp(-vector ))
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Tuple:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool:
_lowerCAmelCase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]:
if function is None:
return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = starting_batch_size
def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() )
# Guard against user error
if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1):
_lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except Exception as e:
if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 664 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''roberta-prelayernorm'''
def __init__( self , _lowerCAmelCase=50_265 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3_072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase ( snake_case_ ):
@property
def __lowerCAmelCase ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 713 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = 300
return config
def __lowerCAmelCase ( self ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = True
_lowerCAmelCase = MraModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = ()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def __lowerCAmelCase ( self ):
return
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 664 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase ( UpperCAmelCase__ ):
def __lowerCAmelCase ( self , _lowerCAmelCase ):
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
_lowerCAmelCase = {"""source""": """What is love ?""", """target""": """life"""}
_lowerCAmelCase = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_lowerCAmelCase = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(_lowerCAmelCase , F'''{split}.{field}''' ) , '''w''' ) as f:
f.write(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = "pytorch" ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = os.path.join(_lowerCAmelCase , '''output''' )
_lowerCAmelCase = os.path.join(_lowerCAmelCase , '''data''' )
self._create_dummy_data(data_dir=_lowerCAmelCase )
_lowerCAmelCase = F'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(F'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
_lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
_lowerCAmelCase = os.path.join(_lowerCAmelCase , '''metrics.json''' )
with open(_lowerCAmelCase ) as f:
_lowerCAmelCase = json.load(_lowerCAmelCase )
return result
@require_torch_gpu
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 664 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
UpperCAmelCase_ = {
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
@lru_cache()
def UpperCAmelCase__ ( )->Union[str, Any]:
_lowerCAmelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_lowerCAmelCase = bs[:]
_lowerCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE_ )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple )->Union[str, Any]:
_lowerCAmelCase = set()
_lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase = char
return pairs
class UpperCAmelCase ( lowercase__ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ["""input_ids""", """attention_mask"""]
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="replace" , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=False , **_lowerCAmelCase , ):
_lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
_lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
_lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
_lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
_lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
_lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding='''utf-8''' ) as vocab_handle:
_lowerCAmelCase = json.load(__lowercase )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase = errors # how to handle errors in decoding
_lowerCAmelCase = bytes_to_unicode()
_lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding='''utf-8''' ) as merges_handle:
_lowerCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
_lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
_lowerCAmelCase = {}
_lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __lowerCAmelCase ( self ):
return len(self.encoder )
def __lowerCAmelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
if token in self.cache:
return self.cache[token]
_lowerCAmelCase = tuple(__lowercase )
_lowerCAmelCase = get_pairs(__lowercase )
if not pairs:
return token
while True:
_lowerCAmelCase = min(__lowercase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase = bigram
_lowerCAmelCase = []
_lowerCAmelCase = 0
while i < len(__lowercase ):
try:
_lowerCAmelCase = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase = tuple(__lowercase )
_lowerCAmelCase = new_word
if len(__lowercase ) == 1:
break
else:
_lowerCAmelCase = get_pairs(__lowercase )
_lowerCAmelCase = ''' '''.join(__lowercase )
_lowerCAmelCase = word
return word
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = []
for token in re.findall(self.pat , __lowercase ):
_lowerCAmelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(''' ''' ) )
return bpe_tokens
def __lowerCAmelCase ( self , _lowerCAmelCase ):
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
return self.decoder.get(__lowercase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = ''''''.join(__lowercase )
_lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' )
_lowerCAmelCase = 0
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_lowerCAmelCase = token_index
writer.write(''' '''.join(__lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=False , **_lowerCAmelCase ):
_lowerCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
_lowerCAmelCase = ''' ''' + text
return (text, kwargs)
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ):
_lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18}
_lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_frames
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = crop_size
def __lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
_lowerCAmelCase = VivitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 664 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase_ = {
"""facebook/mbart-large-en-ro""": 1_0_2_4,
"""facebook/mbart-large-cc25""": 1_0_2_4,
}
# fmt: off
UpperCAmelCase_ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class UpperCAmelCase ( __snake_case ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ = MBartTokenizer
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ):
_lowerCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
vocab_file=A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , **A_ , )
_lowerCAmelCase = vocab_file
_lowerCAmelCase = False if not self.vocab_file else True
_lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
_lowerCAmelCase = {
lang_code: self.convert_tokens_to_ids(A_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase = src_lang if src_lang is not None else "en_XX"
_lowerCAmelCase = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_lowerCAmelCase = src_lang
_lowerCAmelCase = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_ )
_lowerCAmelCase = self.convert_tokens_to_ids(A_ )
_lowerCAmelCase = tgt_lang_id
return inputs
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = "en_XX" , _lowerCAmelCase = None , _lowerCAmelCase = "ro_RO" , **_lowerCAmelCase , ):
_lowerCAmelCase = src_lang
_lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def __lowerCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.convert_tokens_to_ids(A_ )
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
_lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.convert_tokens_to_ids(A_ )
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
_lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
_lowerCAmelCase = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 716 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ):
_lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ):
_lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int:
_lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]:
_lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCAmelCase = scount * numref
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCAmelCase = ccount * numref
# KEEP
_lowerCAmelCase = sgramcounter_rep & cgramcounter_rep
_lowerCAmelCase = keepgramcounter_rep & rgramcounter
_lowerCAmelCase = sgramcounter_rep & rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCAmelCase = sgramcounter_rep - cgramcounter_rep
_lowerCAmelCase = delgramcounter_rep - rgramcounter
_lowerCAmelCase = sgramcounter_rep - rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE )
# ADDITION
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]:
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = ssent.split(''' ''' )
_lowerCAmelCase = csent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for rsent in rsents:
_lowerCAmelCase = rsent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
_lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
_lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sentence
if not return_str:
_lowerCAmelCase = normalized_sent.split()
return normalized_sent
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str:
if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_lowerCAmelCase = 0
for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] )
_lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE )
return 1_0_0 * sari_score
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str:
_lowerCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
_lowerCAmelCase = sacrebleu.corpus_bleu(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = {}
result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
return result
| 664 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self , _lowerCAmelCase = 1 , _lowerCAmelCase = 100 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
if audio_length_in_s is None:
_lowerCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
_lowerCAmelCase = audio_length_in_s * self.unet.config.sample_rate
_lowerCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
_lowerCAmelCase = int(_lowerCAmelCase )
if sample_size % down_scale_factor != 0:
_lowerCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
_lowerCAmelCase = int(_lowerCAmelCase )
_lowerCAmelCase = next(iter(self.unet.parameters() ) ).dtype
_lowerCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowerCAmelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowerCAmelCase = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
# set step values
self.scheduler.set_timesteps(_lowerCAmelCase , device=audio.device )
_lowerCAmelCase = self.scheduler.timesteps.to(_lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCAmelCase = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
_lowerCAmelCase = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
_lowerCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
_lowerCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_lowerCAmelCase )
| 717 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DeiTFeatureExtractor"]
UpperCAmelCase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]:
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(__lowerCAmelCase , '''_dynamo''' ):
return False
return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int = True )->List[str]:
_lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_lowerCAmelCase = is_compiled_module(__lowerCAmelCase )
if is_compiled:
_lowerCAmelCase = model
_lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
_lowerCAmelCase = getattr(__lowerCAmelCase , '''forward''' )
_lowerCAmelCase = model.__dict__.pop('''_original_forward''' , __lowerCAmelCase )
if original_forward is not None:
while hasattr(__lowerCAmelCase , '''__wrapped__''' ):
_lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
_lowerCAmelCase = forward
if getattr(__lowerCAmelCase , '''_converted_to_transformer_engine''' , __lowerCAmelCase ):
convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase )
if is_compiled:
_lowerCAmelCase = model
_lowerCAmelCase = compiled_model
return model
def UpperCAmelCase__ ( )->Dict:
PartialState().wait_for_everyone()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] )->str:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCAmelCase , __lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCAmelCase , __lowerCAmelCase )
@contextmanager
def UpperCAmelCase__ ( **_SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
for key, value in kwargs.items():
_lowerCAmelCase = str(__lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] )->str:
if not hasattr(__lowerCAmelCase , '''__qualname__''' ) and not hasattr(__lowerCAmelCase , '''__name__''' ):
_lowerCAmelCase = getattr(__lowerCAmelCase , '''__class__''' , __lowerCAmelCase )
if hasattr(__lowerCAmelCase , '''__qualname__''' ):
return obj.__qualname__
if hasattr(__lowerCAmelCase , '''__name__''' ):
return obj.__name__
return str(__lowerCAmelCase )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] )->Dict:
for key, value in source.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCAmelCase = destination.setdefault(__lowerCAmelCase , {} )
merge_dicts(__lowerCAmelCase , __lowerCAmelCase )
else:
_lowerCAmelCase = value
return destination
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict = None )->List[str]:
if port is None:
_lowerCAmelCase = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 718 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
_lowerCAmelCase = [0] * n
_lowerCAmelCase = [False] * n
_lowerCAmelCase = [False] * n
def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase = True
_lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase = True
else:
_lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
_lowerCAmelCase = 0
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
UpperCAmelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 664 | 0 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict )->Dict:
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase__ ( )->List[Any]:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 719 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase ( snake_case_ ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = '''pt'''
_lowerCAmelCase = '''tf'''
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase )
model_tf.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 664 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __lowercase ,__lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = StableDiffusionXLImgaImgPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=32 , )
_lowerCAmelCase = CLIPTextModel(__a )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__a )
_lowerCAmelCase = CLIPTextModelWithProjection(__a )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__a )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
_lowerCAmelCase = image / 2 + 0.5
if str(__a ).startswith('''mps''' ):
_lowerCAmelCase = torch.manual_seed(__a )
else:
_lowerCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__a )
_lowerCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
_lowerCAmelCase = self.get_dummy_inputs(__a )
_lowerCAmelCase = sd_pipe(**__a ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self ):
pass
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__a )
_lowerCAmelCase = sd_pipe.to(__a )
_lowerCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
_lowerCAmelCase = self.get_dummy_inputs(__a )
_lowerCAmelCase = 3 * ["""this is a negative prompt"""]
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
_lowerCAmelCase = sd_pipe(**__a )
_lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCAmelCase = self.get_dummy_inputs(__a )
_lowerCAmelCase = 3 * ["""this is a negative prompt"""]
_lowerCAmelCase = 3 * [inputs.pop('''prompt''' )]
(
_lowerCAmelCase
) = sd_pipe.encode_prompt(__a , negative_prompt=__a )
_lowerCAmelCase = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
_lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase="cpu" , _lowerCAmelCase=torch.floataa , _lowerCAmelCase=0 ):
_lowerCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
_lowerCAmelCase = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
_lowerCAmelCase = torch.from_numpy(__a ).to(device=__a , dtype=__a )
_lowerCAmelCase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_lowerCAmelCase = self.get_inputs(__a )
_lowerCAmelCase = pipe(**__a ).images
_lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 720 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = DiTPipeline
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('''mps''' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''cpu'''
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCAmelCase ( self ):
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 664 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def __lowerCAmelCase ( self ):
_lowerCAmelCase = None
_lowerCAmelCase = 20
_lowerCAmelCase = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase__ )
# tweak scores to not be uniform anymore
_lowerCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_lowerCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_lowerCAmelCase = jax.nn.softmax(UpperCAmelCase__ , axis=-1 )
_lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
_lowerCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 )
_lowerCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = None
_lowerCAmelCase = 10
_lowerCAmelCase = 2
# create ramp distribution
_lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
_lowerCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
_lowerCAmelCase = FlaxTopKLogitsWarper(3 )
_lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_lowerCAmelCase = 5
_lowerCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, length) ).copy()
_lowerCAmelCase = top_k_warp_safety_check(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = None
_lowerCAmelCase = 10
_lowerCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_lowerCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
_lowerCAmelCase = np.exp(top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_lowerCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_lowerCAmelCase = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_lowerCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = 20
_lowerCAmelCase = 4
_lowerCAmelCase = 0
_lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__ )
# check that min length is applied at length 5
_lowerCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
_lowerCAmelCase = 5
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = 15
_lowerCAmelCase = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = 20
_lowerCAmelCase = 4
_lowerCAmelCase = 0
_lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
# check that all scores are -inf except the bos_token_id score
_lowerCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
_lowerCAmelCase = 1
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_lowerCAmelCase = 3
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = 20
_lowerCAmelCase = 4
_lowerCAmelCase = 0
_lowerCAmelCase = 5
_lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
_lowerCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
_lowerCAmelCase = 4
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_lowerCAmelCase = 3
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = 4
_lowerCAmelCase = 10
_lowerCAmelCase = 15
_lowerCAmelCase = 2
_lowerCAmelCase = 1
_lowerCAmelCase = 15
# dummy input_ids and scores
_lowerCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ )
_lowerCAmelCase = input_ids.copy()
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = scores.copy()
# instantiate all dist processors
_lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCAmelCase = FlaxTopKLogitsWarper(3 )
_lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__ )
_lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
_lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
_lowerCAmelCase = 10
# no processor list
_lowerCAmelCase = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# with processor list
_lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCAmelCase = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = 4
_lowerCAmelCase = 10
_lowerCAmelCase = 15
_lowerCAmelCase = 2
_lowerCAmelCase = 1
_lowerCAmelCase = 15
# dummy input_ids and scores
_lowerCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ )
_lowerCAmelCase = input_ids.copy()
_lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = scores.copy()
# instantiate all dist processors
_lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCAmelCase = FlaxTopKLogitsWarper(3 )
_lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__ )
_lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
_lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
_lowerCAmelCase = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
_lowerCAmelCase = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCAmelCase = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
return scores
_lowerCAmelCase = jax.jit(UpperCAmelCase__ )
_lowerCAmelCase = jax.jit(UpperCAmelCase__ )
_lowerCAmelCase = jitted_run_no_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = jitted_run_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 721 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ = {"UserAgent": UserAgent().random}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict:
_lowerCAmelCase = script.contents[0]
_lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = F'''https://www.instagram.com/{username}/'''
_lowerCAmelCase = self.get_json()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text
_lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __lowerCAmelCase ( self ):
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_private"]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = InstagramUser("github")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 664 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 700 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]:
_lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
# Initialize Result
_lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_SCREAMING_SNAKE_CASE ):
# Find denominations
while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ):
total_value -= int(_SCREAMING_SNAKE_CASE )
answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ = []
UpperCAmelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCAmelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 664 | 0 |
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase_ = [8, 5, 9, 7]
UpperCAmelCase_ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase_ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = claim_vector
_lowerCAmelCase = allocated_resources_table
_lowerCAmelCase = maximum_claim_table
def __lowerCAmelCase ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCAmelCase ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCAmelCase ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_lowerCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCAmelCase ( self ):
return {self.__need().index(_lowerCAmelCase ): i for i in self.__need()}
def __lowerCAmelCase ( self , **_lowerCAmelCase ):
_lowerCAmelCase = self.__need()
_lowerCAmelCase = self.__allocated_resources_table
_lowerCAmelCase = self.__available_resources()
_lowerCAmelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
_lowerCAmelCase = False
for each_need in need_list:
_lowerCAmelCase = True
for index, need in enumerate(_lowerCAmelCase ):
if need > available_resources[index]:
_lowerCAmelCase = False
break
if execution:
_lowerCAmelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_lowerCAmelCase = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_lowerCAmelCase )
# update available/freed resources stack
_lowerCAmelCase = np.array(_lowerCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(_lowerCAmelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __lowerCAmelCase ( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(_lowerCAmelCase ) + 1}'''
+ ''' '''.join(F'''{it:>8}''' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(_lowerCAmelCase ) + 1}'''
+ ''' '''.join(F'''{it:>8}''' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(_lowerCAmelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(_lowerCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict:
# Initialise PyTorch model
_lowerCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 664 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ = 1_6
UpperCAmelCase_ = 3_2
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = 1_6 )->Tuple:
_lowerCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowerCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_SCREAMING_SNAKE_CASE : Any ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_SCREAMING_SNAKE_CASE : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase = 1_6
elif accelerator.mixed_precision != "no":
_lowerCAmelCase = 8
else:
_lowerCAmelCase = None
return tokenizer.pad(
lowercase__ , padding='''longest''' , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
_lowerCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
_lowerCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] )->Dict:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowercase__ ) == "1":
_lowerCAmelCase = 2
# New Code #
_lowerCAmelCase = int(args.gradient_accumulation_steps )
# Initialize accelerator
_lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase = config['''lr''']
_lowerCAmelCase = int(config['''num_epochs'''] )
_lowerCAmelCase = int(config['''seed'''] )
_lowerCAmelCase = int(config['''batch_size'''] )
_lowerCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
set_seed(lowercase__ )
_lowerCAmelCase , _lowerCAmelCase = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
_lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
_lowerCAmelCase = model(**lowercase__ )
_lowerCAmelCase = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase = model(**lowercase__ )
_lowerCAmelCase = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
_lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowercase__ )
def UpperCAmelCase__ ( )->str:
_lowerCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase__ , default=lowercase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowercase__ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 702 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "Hello world! cécé herlolip"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]:
_lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
_lowerCAmelCase = roberta.model.encoder.sentence_encoder
_lowerCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight
_lowerCAmelCase = roberta_sent_encoder.embed_positions.weight
_lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowerCAmelCase = roberta_sent_encoder.layer_norm.weight
_lowerCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCAmelCase = model.roberta.encoder.layer[i]
_lowerCAmelCase = roberta_sent_encoder.layers[i]
_lowerCAmelCase = layer.attention
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowerCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowerCAmelCase = roberta_layer.self_attn.q_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.q_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.k_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.k_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.v_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowerCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowerCAmelCase = roberta_layer.self_attn.out_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowerCAmelCase = roberta_layer.final_layer_norm.weight
_lowerCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
_lowerCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# output
_lowerCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0]
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) )
else:
_lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
_lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 664 | 0 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 , _SCREAMING_SNAKE_CASE : List[str] = 1 , _SCREAMING_SNAKE_CASE : Optional[Any] = 1.0e4 , _SCREAMING_SNAKE_CASE : Optional[Any] = False , _SCREAMING_SNAKE_CASE : Dict = 1.0 , )->Optional[Any]:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
_lowerCAmelCase = float(embedding_dim // 2 )
_lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(lowerCAmelCase_ , dtype=jnp.floataa ) * -log_timescale_increment )
_lowerCAmelCase = jnp.expand_dims(lowerCAmelCase_ , 1 ) * jnp.expand_dims(lowerCAmelCase_ , 0 )
# scale embeddings
_lowerCAmelCase = scale * emb
if flip_sin_to_cos:
_lowerCAmelCase = jnp.concatenate([jnp.cos(lowerCAmelCase_ ), jnp.sin(lowerCAmelCase_ )] , axis=1 )
else:
_lowerCAmelCase = jnp.concatenate([jnp.sin(lowerCAmelCase_ ), jnp.cos(lowerCAmelCase_ )] , axis=1 )
_lowerCAmelCase = jnp.reshape(lowerCAmelCase_ , [jnp.shape(lowerCAmelCase_ )[0], embedding_dim] )
return signal
class UpperCAmelCase ( nn.Module ):
SCREAMING_SNAKE_CASE__ = 3_2
SCREAMING_SNAKE_CASE__ = jnp.floataa
@nn.compact
def __call__( self , _lowerCAmelCase ):
_lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(lowercase__ )
_lowerCAmelCase = nn.silu(lowercase__ )
_lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(lowercase__ )
return temb
class UpperCAmelCase ( nn.Module ):
SCREAMING_SNAKE_CASE__ = 3_2
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 1
@nn.compact
def __call__( self , _lowerCAmelCase ):
return get_sinusoidal_embeddings(
lowercase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 703 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = i / num_diffusion_timesteps
_lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ):
if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None:
_lowerCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowerCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCAmelCase = 1.0 - self.betas
_lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCAmelCase = 1.0
# setable values
_lowerCAmelCase = None
_lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
return sample
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_lowerCAmelCase = num_inference_steps
_lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
# 1. get previous step value (=t+1)
_lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCAmelCase = self.alphas_cumprod[timestep]
_lowerCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCAmelCase = model_output
elif self.config.prediction_type == "sample":
_lowerCAmelCase = model_output
_lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 664 | 0 |
import random
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : bool = False )->List[str]:
_lowerCAmelCase = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE__ )
return graph
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->List[Any]:
return {
i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor'''
SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowerCAmelCase = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowerCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 664 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 706 |
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list:
if len(_SCREAMING_SNAKE_CASE ) == 0:
return []
_lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = int(max_value - min_value ) + 1
_lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 664 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->str:
_lowerCAmelCase = torch.load(lowercase_ , map_location='''cpu''' )
if "model" in sd.keys():
_lowerCAmelCase = torch.load(lowercase_ , map_location='''cpu''' )["model"]
# pop unnecessary weights
_lowerCAmelCase = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase_ )
_lowerCAmelCase = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCAmelCase = sd.pop(lowercase_ )
_lowerCAmelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCAmelCase = sd[key]
# We split QKV in separate Q,K,V
_lowerCAmelCase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
_lowerCAmelCase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
_lowerCAmelCase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
_lowerCAmelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCAmelCase = torch.split(lowercase_ , depth // 3 , dim=0 )
_lowerCAmelCase = q
_lowerCAmelCase = k
_lowerCAmelCase = v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int=None )->int:
_lowerCAmelCase = load_checkpoint(lowercase_ )
if config is not None:
_lowerCAmelCase = OPTConfig.from_pretrained(lowercase_ )
else:
_lowerCAmelCase = OPTConfig()
_lowerCAmelCase = OPTModel(lowercase_ ).half().eval()
model.load_state_dict(lowercase_ )
# Check results
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
UpperCAmelCase_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 707 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCAmelCase__ ( )->Any:
_lowerCAmelCase = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase = get_sagemaker_input()
else:
_lowerCAmelCase = get_cluster_input()
return config
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str:
if subparsers is not None:
_lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str:
_lowerCAmelCase = get_user_input()
if args.config_file is not None:
_lowerCAmelCase = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def UpperCAmelCase__ ( )->List[Any]:
_lowerCAmelCase = config_command_parser()
_lowerCAmelCase = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 664 | 0 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] )->List[str]:
_lowerCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
_lowerCAmelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_lowerCAmelCase = min(__UpperCamelCase , __UpperCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 708 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase_ = 1_0
UpperCAmelCase_ = 2_5_6
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]:
if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE )
for token in set(_SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]:
return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class UpperCAmelCase :
def __init__( self , *,
_lowerCAmelCase = 0.85 , ):
_lowerCAmelCase = duplication_jaccard_threshold
_lowerCAmelCase = NUM_PERM
_lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase = defaultdict(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self._index.query(_lowerCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase = [base] + list(_lowerCAmelCase )
# reformat the cluster to be a list of dict
_lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_lowerCAmelCase )
return duplicate_clusters
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.get_duplicate_clusters()
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = element
_lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str:
_lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ):
di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float:
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]:
_lowerCAmelCase = []
for elementa in cluster:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase = 1
extremes.append(_SCREAMING_SNAKE_CASE )
return extremes
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple:
global _shared_dataset
_lowerCAmelCase = dataset
_lowerCAmelCase = []
_lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(_SCREAMING_SNAKE_CASE )
return extremes_list
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase = {}
_lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase = element
_lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
return ds_filter, duplicate_clusters
| 664 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowerCAmelCase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = embedding_size
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = TFMobileBertModel(config=_lowerCAmelCase )
_lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = [input_ids, input_mask]
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
_lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
_lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = TFMobileBertForPreTraining(config=_lowerCAmelCase )
_lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
_lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
_lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
_lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __lowerCAmelCase ( self ):
_lowerCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
_lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = [1, 6, 30_522]
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 709 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = process
_lowerCAmelCase = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
_lowerCAmelCase = self.dataset[i]
_lowerCAmelCase = self.process(_lowerCAmelCase , **self.params )
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowerCAmelCase = loader
_lowerCAmelCase = infer
_lowerCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowerCAmelCase = None
_lowerCAmelCase = loader_batch_size
# Internal bookkeeping
_lowerCAmelCase = None
_lowerCAmelCase = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowerCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowerCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Convert ModelOutput to tuple first
_lowerCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowerCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowerCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowerCAmelCase = next(self.iterator )
_lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_lowerCAmelCase = processed
_lowerCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
_lowerCAmelCase = None
return self
def __lowerCAmelCase ( self ):
if self.subiterator is None:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_lowerCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
_lowerCAmelCase = next(self.subiterator )
return processed
class UpperCAmelCase ( snake_case_ ):
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowerCAmelCase = False
_lowerCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
_lowerCAmelCase = processed
_lowerCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
else:
_lowerCAmelCase = processed
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
return accumulator
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return self.dataset[i][self.key]
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = keya
_lowerCAmelCase = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 664 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase ( UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , **_lowerCAmelCase , ):
super().__init__(**_a )
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 224}
_lowerCAmelCase = get_size_dict(_a , default_to_square=_a )
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase = get_size_dict(_a , default_to_square=_a , param_name='''crop_size''' )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCAmelCase = do_convert_rgb
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowerCAmelCase = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCAmelCase = get_resize_output_image_size(_a , size=size['''shortest_edge'''] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowerCAmelCase = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ):
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_a , param_name='''size''' , default_to_square=_a )
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_a , param_name='''crop_size''' , default_to_square=_a )
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCAmelCase = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCAmelCase = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_a ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_a , _a ) for image in images]
_lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
| 710 |
import numpy
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
_lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCAmelCase = numpy.zeros(output_array.shape )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __lowerCAmelCase ( self ):
_lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
for iteration in range(1 , iterations + 1 ):
_lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = input_arr
_lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return (value) * (1 - (value))
def UpperCAmelCase__ ( )->int:
_lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=1_0 , give_loss=_SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Optional[int] )->int:
_lowerCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir("fixtures")
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = mock.Mock()
_lowerCAmelCase = 500
_lowerCAmelCase = {}
_lowerCAmelCase = HTTPError
_lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head:
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def __lowerCAmelCase ( cls ):
_lowerCAmelCase = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __lowerCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __lowerCAmelCase ( self ):
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id='''test-feature-extractor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __lowerCAmelCase ( self ):
CustomFeatureExtractor.register_for_auto_class()
_lowerCAmelCase = CustomFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 712 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool:
_lowerCAmelCase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]:
if function is None:
return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = starting_batch_size
def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() )
# Guard against user error
if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1):
_lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except Exception as e:
if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 664 | 0 |
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int | float | str , _SCREAMING_SNAKE_CASE : int | float | str )->list[str]:
if nth_term == "":
return [""]
_lowerCAmelCase = int(__snake_case )
_lowerCAmelCase = int(__snake_case )
_lowerCAmelCase = []
for temp in range(int(__snake_case ) ):
series.append(f'''1 / {pow(temp + 1 , int(__snake_case ) )}''' if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = int(input("Enter the last number (nth term) of the P-Series"))
UpperCAmelCase_ = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 713 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = 300
return config
def __lowerCAmelCase ( self ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = True
_lowerCAmelCase = MraModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = ()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def __lowerCAmelCase ( self ):
return
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 664 | 0 |
class UpperCAmelCase ( __lowercase ):
pass
class UpperCAmelCase ( __lowercase ):
pass
class UpperCAmelCase :
def __init__( self ):
_lowerCAmelCase = [
[],
[],
[],
]
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(__A )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def __lowerCAmelCase ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__( self ):
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class UpperCAmelCase :
def __init__( self ):
_lowerCAmelCase = []
def __lowerCAmelCase ( self , _lowerCAmelCase ):
if len(self.queue ) == 100:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(__A )
def __lowerCAmelCase ( self ):
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
_lowerCAmelCase = min(self.queue )
self.queue.remove(__A )
return data
def __str__( self ):
return str(self.queue )
def UpperCAmelCase__ ( )->Union[str, Any]:
_lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(a__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(a__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def UpperCAmelCase__ ( )->str:
_lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(a__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(a__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 664 | 0 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = ['''input_features''', '''attention_mask''']
def __init__( self , _lowerCAmelCase=80 , _lowerCAmelCase=16_000 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=25 , _lowerCAmelCase="hamming_window" , _lowerCAmelCase=32_768.0 , _lowerCAmelCase=0.97 , _lowerCAmelCase=1.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , **_lowerCAmelCase , ):
super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A )
_lowerCAmelCase = feature_size
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = padding_value
_lowerCAmelCase = hop_length
_lowerCAmelCase = win_length
_lowerCAmelCase = frame_signal_scale
_lowerCAmelCase = preemphasis_coeff
_lowerCAmelCase = mel_floor
_lowerCAmelCase = normalize_means
_lowerCAmelCase = normalize_vars
_lowerCAmelCase = win_function
_lowerCAmelCase = return_attention_mask
_lowerCAmelCase = win_length * sampling_rate // 1_000
_lowerCAmelCase = hop_length * sampling_rate // 1_000
_lowerCAmelCase = optimal_fft_length(self.sample_size )
_lowerCAmelCase = (self.n_fft // 2) + 1
def __lowerCAmelCase ( self , _lowerCAmelCase ):
if self.win_function == "hamming_window":
_lowerCAmelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=__A )
else:
_lowerCAmelCase = window_function(window_length=self.sample_size , name=self.win_function )
_lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_lowerCAmelCase = spectrogram(
one_waveform * self.frame_signal_scale , window=__A , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__A , preemphasis=self.preemphasis_coeff , mel_filters=__A , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if self.normalize_means:
_lowerCAmelCase = x[:input_length].mean(axis=0 )
_lowerCAmelCase = np.subtract(__A , __A )
if self.normalize_vars:
_lowerCAmelCase = x[:input_length].std(axis=0 )
_lowerCAmelCase = np.divide(__A , __A )
if input_length < x.shape[0]:
_lowerCAmelCase = padding_value
# make sure array is in float32
_lowerCAmelCase = x.astype(np.floataa )
return x
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowerCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A , __A , self.padding_value ) for x, n in zip(__A , __A )]
def __call__( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_lowerCAmelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowerCAmelCase = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCAmelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
_lowerCAmelCase = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase = [raw_speech]
# extract fbank features
_lowerCAmelCase = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCAmelCase = BatchFeature({'''input_features''': features} )
_lowerCAmelCase = self.pad(
__A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , )
# make sure list is in array format
_lowerCAmelCase = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __A ):
_lowerCAmelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features]
_lowerCAmelCase = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_lowerCAmelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCAmelCase = (
np.array(__A , dtype=np.intaa )
if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCAmelCase = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__A )
if return_tensors is not None:
_lowerCAmelCase = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ):
_lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18}
_lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_frames
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = crop_size
def __lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
_lowerCAmelCase = VivitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 664 | 0 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __SCREAMING_SNAKE_CASE ( )->str:
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __SCREAMING_SNAKE_CASE ( )->Dict:
_lowerCAmelCase = '''mock-s3-bucket'''
_lowerCAmelCase = f'''s3://{mock_bucket}'''
_lowerCAmelCase = extract_path_from_uri(_UpperCAmelCase )
assert dataset_path.startswith('''s3://''' ) is False
_lowerCAmelCase = '''./local/path'''
_lowerCAmelCase = extract_path_from_uri(_UpperCAmelCase )
assert dataset_path == new_dataset_path
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Union[str, Any]:
_lowerCAmelCase = is_remote_filesystem(_UpperCAmelCase )
assert is_remote is True
_lowerCAmelCase = fsspec.filesystem('''file''' )
_lowerCAmelCase = is_remote_filesystem(_UpperCAmelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple )->Any:
_lowerCAmelCase = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
_lowerCAmelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
_lowerCAmelCase = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCAmelCase )
_lowerCAmelCase = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase = os.path.basename(_UpperCAmelCase )
_lowerCAmelCase = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCAmelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] )->int:
_lowerCAmelCase = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
_lowerCAmelCase = compressed_file_paths[protocol]
_lowerCAmelCase = '''dataset.jsonl'''
_lowerCAmelCase = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
_lowerCAmelCase , *_lowerCAmelCase = fsspec.get_fs_token_paths(_UpperCAmelCase )
assert fs.isfile(_UpperCAmelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict )->Dict:
_lowerCAmelCase = hf_api.dataset_info(_UpperCAmelCase , token=_UpperCAmelCase )
_lowerCAmelCase = HfFileSystem(repo_info=_UpperCAmelCase , token=_UpperCAmelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCAmelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def __SCREAMING_SNAKE_CASE ( )->Optional[int]:
_lowerCAmelCase = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCAmelCase , _UpperCAmelCase , clobber=_UpperCAmelCase )
with pytest.warns(_UpperCAmelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCAmelCase ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 716 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ):
_lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ):
_lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int:
_lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]:
_lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCAmelCase = scount * numref
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCAmelCase = ccount * numref
# KEEP
_lowerCAmelCase = sgramcounter_rep & cgramcounter_rep
_lowerCAmelCase = keepgramcounter_rep & rgramcounter
_lowerCAmelCase = sgramcounter_rep & rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCAmelCase = sgramcounter_rep - cgramcounter_rep
_lowerCAmelCase = delgramcounter_rep - rgramcounter
_lowerCAmelCase = sgramcounter_rep - rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE )
# ADDITION
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]:
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = ssent.split(''' ''' )
_lowerCAmelCase = csent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for rsent in rsents:
_lowerCAmelCase = rsent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
_lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
_lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sentence
if not return_str:
_lowerCAmelCase = normalized_sent.split()
return normalized_sent
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str:
if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_lowerCAmelCase = 0
for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] )
_lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE )
return 1_0_0 * sari_score
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str:
_lowerCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
_lowerCAmelCase = sacrebleu.corpus_bleu(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = {}
result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
return result
| 664 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
SCREAMING_SNAKE_CASE__ = 4_2
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
UpperCAmelCase_ = namedtuple("CoinsDistribResult", "moves excess")
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : TreeNode | None )->int:
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowerCAmelCase = get_distrib(node.left )
_lowerCAmelCase = get_distrib(node.right )
_lowerCAmelCase = 1 - left_distrib_excess
_lowerCAmelCase = 1 - right_distrib_excess
_lowerCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
_lowerCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DeiTFeatureExtractor"]
UpperCAmelCase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( snake_case_ ):
@slow
@require_torch
def __lowerCAmelCase ( self ):
_lowerCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_lowerCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_lowerCAmelCase = bertabert.config.encoder.vocab_size
_lowerCAmelCase = tokenizer.sep_token_id
_lowerCAmelCase = tokenizer.cls_token_id
_lowerCAmelCase = 128
_lowerCAmelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_lowerCAmelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_lowerCAmelCase = train_dataset.select(range(32 ) )
_lowerCAmelCase = val_dataset.select(range(16 ) )
_lowerCAmelCase = 4
def _map_to_encoder_decoder_inputs(_lowerCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCAmelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__A , max_length=512 )
_lowerCAmelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__A , max_length=128 )
_lowerCAmelCase = inputs.input_ids
_lowerCAmelCase = inputs.attention_mask
_lowerCAmelCase = outputs.input_ids
_lowerCAmelCase = outputs.input_ids.copy()
_lowerCAmelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_lowerCAmelCase = outputs.attention_mask
assert all(len(__A ) == 512 for x in inputs.input_ids )
assert all(len(__A ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_lowerCAmelCase ):
_lowerCAmelCase = pred.label_ids
_lowerCAmelCase = pred.predictions
# all unnecessary tokens are removed
_lowerCAmelCase = tokenizer.batch_decode(__A , skip_special_tokens=__A )
_lowerCAmelCase = tokenizer.batch_decode(__A , skip_special_tokens=__A )
_lowerCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__A ) )] ) / len(__A )
return {"accuracy": accuracy}
# map train dataset
_lowerCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_lowerCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = SeqaSeqTrainingArguments(
output_dir=__A , per_device_train_batch_size=__A , per_device_eval_batch_size=__A , predict_with_generate=__A , evaluation_strategy='''steps''' , do_train=__A , do_eval=__A , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowerCAmelCase = SeqaSeqTrainer(
model=__A , args=__A , compute_metrics=_compute_metrics , train_dataset=__A , eval_dataset=__A , tokenizer=__A , )
# start training
trainer.train()
| 718 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
_lowerCAmelCase = [0] * n
_lowerCAmelCase = [False] * n
_lowerCAmelCase = [False] * n
def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase = True
_lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase = True
else:
_lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
_lowerCAmelCase = 0
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
UpperCAmelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 664 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , ):
_lowerCAmelCase = parent
_lowerCAmelCase = 13
_lowerCAmelCase = 7
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = 99
_lowerCAmelCase = 32
_lowerCAmelCase = 2
_lowerCAmelCase = 4
_lowerCAmelCase = 37
_lowerCAmelCase = """gelu"""
_lowerCAmelCase = 0.1
_lowerCAmelCase = 0.1
_lowerCAmelCase = 512
_lowerCAmelCase = 16
_lowerCAmelCase = 2
_lowerCAmelCase = 0.02
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = None
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = TFDistilBertModel(config=_lowercase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase = model(_lowercase )
_lowerCAmelCase = [input_ids, input_mask]
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = TFDistilBertForMaskedLM(config=_lowercase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = TFDistilBertForQuestionAnswering(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFDistilBertForSequenceClassification(_lowercase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = TFDistilBertForMultipleChoice(_lowercase )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFDistilBertForTokenClassification(_lowercase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(_lowerCAmelCase) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __snake_case ,__snake_case ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
_lowerCAmelCase = TFDistilBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , dim=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowercase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowercase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowercase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowercase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowercase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowercase )
@slow
def __lowerCAmelCase ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_lowerCAmelCase = TFDistilBertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase = model(_lowercase )[0]
_lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , _lowercase )
_lowerCAmelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-4 )
| 719 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase ( snake_case_ ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = '''pt'''
_lowerCAmelCase = '''tf'''
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase )
model_tf.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 664 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCAmelCase_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCAmelCase_ = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Vector , _SCREAMING_SNAKE_CASE : Vector )->VectorOut:
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Vector , _SCREAMING_SNAKE_CASE : Vector )->VectorOut:
return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCAmelCase__ ( )->None:
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0_0_0_0 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 720 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = DiTPipeline
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('''mps''' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''cpu'''
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCAmelCase ( self ):
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 664 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = KandinskyVaaControlnetPipeline
SCREAMING_SNAKE_CASE__ = ['image_embeds', 'negative_image_embeds', 'hint']
SCREAMING_SNAKE_CASE__ = ['image_embeds', 'negative_image_embeds', 'hint']
SCREAMING_SNAKE_CASE__ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE__ = False
@property
def __lowerCAmelCase ( self ):
return 32
@property
def __lowerCAmelCase ( self ):
return 32
@property
def __lowerCAmelCase ( self ):
return self.time_input_dim
@property
def __lowerCAmelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ):
return 100
@property
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_lowerCAmelCase = UNetaDConditionModel(**_a )
return model
@property
def __lowerCAmelCase ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.dummy_unet
_lowerCAmelCase = self.dummy_movq
_lowerCAmelCase = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
_lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith('''mps''' ):
_lowerCAmelCase = torch.manual_seed(_a )
else:
_lowerCAmelCase = torch.Generator(device=_a ).manual_seed(_a )
_lowerCAmelCase = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''cpu'''
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_a )
_lowerCAmelCase = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(_a ) )
_lowerCAmelCase = output.images
_lowerCAmelCase = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
_lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
_lowerCAmelCase = torch.from_numpy(np.array(_a ) ).float() / 255.0
_lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_lowerCAmelCase = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
_lowerCAmelCase = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_lowerCAmelCase = '''A robot, 4k photo'''
_lowerCAmelCase = torch.Generator(device='''cuda''' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_lowerCAmelCase = torch.Generator(device='''cuda''' ).manual_seed(0 )
_lowerCAmelCase = pipeline(
image_embeds=_a , negative_image_embeds=_a , hint=_a , generator=_a , num_inference_steps=100 , output_type='''np''' , )
_lowerCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a , _a )
| 721 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ = {"UserAgent": UserAgent().random}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict:
_lowerCAmelCase = script.contents[0]
_lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = F'''https://www.instagram.com/{username}/'''
_lowerCAmelCase = self.get_json()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text
_lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __lowerCAmelCase ( self ):
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_private"]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = InstagramUser("github")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 664 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase_ = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]:
_lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
# Initialize Result
_lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_SCREAMING_SNAKE_CASE ):
# Find denominations
while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ):
total_value -= int(_SCREAMING_SNAKE_CASE )
answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ = []
UpperCAmelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCAmelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 664 | 0 |
from math import isqrt
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->List[str]:
return all(number % divisor != 0 for divisor in range(2 , isqrt(_SCREAMING_SNAKE_CASE ) + 1 ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple = 1_0**6 )->Any:
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(_SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 701 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict:
# Initialise PyTorch model
_lowerCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 664 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase__ )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase__ )
_lowerCAmelCase = model.generate(lowercase__ , max_new_tokens=10 , do_sample=lowercase__ )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(lowercase__ )
model.generate(lowercase__ , max_new_tokens=10 , do_sample=lowercase__ , streamer=lowercase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(lowercase__ , lowercase__ )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase__ )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase__ )
_lowerCAmelCase = model.generate(lowercase__ , max_new_tokens=10 , do_sample=lowercase__ )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
_lowerCAmelCase = TextIteratorStreamer(lowercase__ )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=lowercase__ )
thread.start()
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowercase__ , lowercase__ )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase__ )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase__ )
_lowerCAmelCase = model.generate(lowercase__ , max_new_tokens=10 , do_sample=lowercase__ )
_lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
_lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(lowercase__ , skip_prompt=lowercase__ )
model.generate(lowercase__ , max_new_tokens=10 , do_sample=lowercase__ , streamer=lowercase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(lowercase__ , lowercase__ )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(lowercase__ )
_lowerCAmelCase = -1
_lowerCAmelCase = torch.ones((1, 5) , device=lowercase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(lowercase__ , skip_special_tokens=lowercase__ )
model.generate(lowercase__ , max_new_tokens=1 , do_sample=lowercase__ , streamer=lowercase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
_lowerCAmelCase = tokenizer(lowercase__ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowercase__ )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase__ )
_lowerCAmelCase = TextIteratorStreamer(lowercase__ , timeout=0.001 )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=lowercase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowercase__ ):
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
| 702 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "Hello world! cécé herlolip"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]:
_lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
_lowerCAmelCase = roberta.model.encoder.sentence_encoder
_lowerCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight
_lowerCAmelCase = roberta_sent_encoder.embed_positions.weight
_lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowerCAmelCase = roberta_sent_encoder.layer_norm.weight
_lowerCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCAmelCase = model.roberta.encoder.layer[i]
_lowerCAmelCase = roberta_sent_encoder.layers[i]
_lowerCAmelCase = layer.attention
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowerCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowerCAmelCase = roberta_layer.self_attn.q_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.q_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.k_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.k_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.v_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowerCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowerCAmelCase = roberta_layer.self_attn.out_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowerCAmelCase = roberta_layer.final_layer_norm.weight
_lowerCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
_lowerCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# output
_lowerCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0]
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) )
else:
_lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
_lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 664 | 0 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )->Any:
_lowerCAmelCase = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = CLIPConfig
SCREAMING_SNAKE_CASE__ = ['''CLIPEncoderLayer''']
def __init__( self , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase )
_lowerCAmelCase = CLIPVisionModel(config.vision_config )
_lowerCAmelCase = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowerCAmelCase )
_lowerCAmelCase = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_lowerCAmelCase )
_lowerCAmelCase = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowerCAmelCase )
_lowerCAmelCase = nn.Parameter(torch.ones(17 ) , requires_grad=_lowerCAmelCase )
_lowerCAmelCase = nn.Parameter(torch.ones(3 ) , requires_grad=_lowerCAmelCase )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.vision_model(_lowerCAmelCase )[1] # pooled_output
_lowerCAmelCase = self.visual_projection(_lowerCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase = cosine_distance(_lowerCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase = cosine_distance(_lowerCAmelCase , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase = []
_lowerCAmelCase = image_embeds.shape[0]
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase = special_cos_dist[i][concept_idx]
_lowerCAmelCase = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
_lowerCAmelCase = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase = cos_dist[i][concept_idx]
_lowerCAmelCase = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowerCAmelCase )
result.append(_lowerCAmelCase )
_lowerCAmelCase = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.vision_model(_lowerCAmelCase )[1] # pooled_output
_lowerCAmelCase = self.visual_projection(_lowerCAmelCase )
_lowerCAmelCase = cosine_distance(_lowerCAmelCase , self.special_care_embeds )
_lowerCAmelCase = cosine_distance(_lowerCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase = 0.0
_lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase = special_care * 0.01
_lowerCAmelCase = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 703 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = i / num_diffusion_timesteps
_lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ):
if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None:
_lowerCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowerCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCAmelCase = 1.0 - self.betas
_lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCAmelCase = 1.0
# setable values
_lowerCAmelCase = None
_lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
return sample
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_lowerCAmelCase = num_inference_steps
_lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
# 1. get previous step value (=t+1)
_lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCAmelCase = self.alphas_cumprod[timestep]
_lowerCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCAmelCase = model_output
elif self.config.prediction_type == "sample":
_lowerCAmelCase = model_output
_lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 664 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( __A ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self , _lowerCAmelCase = 1 , _lowerCAmelCase = 100 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
if audio_length_in_s is None:
_lowerCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
_lowerCAmelCase = audio_length_in_s * self.unet.config.sample_rate
_lowerCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
_lowerCAmelCase = int(_lowerCAmelCase )
if sample_size % down_scale_factor != 0:
_lowerCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
_lowerCAmelCase = int(_lowerCAmelCase )
_lowerCAmelCase = next(iter(self.unet.parameters() ) ).dtype
_lowerCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowerCAmelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowerCAmelCase = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
# set step values
self.scheduler.set_timesteps(_lowerCAmelCase , device=audio.device )
_lowerCAmelCase = self.scheduler.timesteps.to(_lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCAmelCase = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
_lowerCAmelCase = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
_lowerCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
_lowerCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_lowerCAmelCase )
| 704 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple )->List[str]:
_lowerCAmelCase = ''''''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] )->int:
return data[1:] + data[0]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] )->Dict:
_lowerCAmelCase = ''''''
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] )->Union[str, Any]:
_lowerCAmelCase = int('''0b''' + data[0] + data[-1] , 2 )
_lowerCAmelCase = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any )->Any:
_lowerCAmelCase = message[:4]
_lowerCAmelCase = message[4:]
_lowerCAmelCase = apply_table(__snake_case , __snake_case )
_lowerCAmelCase = xor(__snake_case , __snake_case )
_lowerCAmelCase = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
_lowerCAmelCase = apply_sbox(__snake_case , temp[4:] )
_lowerCAmelCase = '''0''' * (2 - len(__snake_case )) + l # noqa: E741
_lowerCAmelCase = '''0''' * (2 - len(__snake_case )) + r
_lowerCAmelCase = apply_table(l + r , __snake_case )
_lowerCAmelCase = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
UpperCAmelCase_ = input("Enter 10 bit key: ")
UpperCAmelCase_ = input("Enter 8 bit message: ")
UpperCAmelCase_ = [6, 3, 7, 4, 8, 5, 1_0, 9]
UpperCAmelCase_ = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
UpperCAmelCase_ = [2, 4, 3, 1]
UpperCAmelCase_ = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCAmelCase_ = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCAmelCase_ = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCAmelCase_ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCAmelCase_ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCAmelCase_ = apply_table(key, paa_table)
UpperCAmelCase_ = temp[:5]
UpperCAmelCase_ = temp[5:]
UpperCAmelCase_ = left_shift(left)
UpperCAmelCase_ = left_shift(right)
UpperCAmelCase_ = apply_table(left + right, pa_table)
UpperCAmelCase_ = left_shift(left)
UpperCAmelCase_ = left_shift(right)
UpperCAmelCase_ = left_shift(left)
UpperCAmelCase_ = left_shift(right)
UpperCAmelCase_ = apply_table(left + right, pa_table)
# encryption
UpperCAmelCase_ = apply_table(message, IP)
UpperCAmelCase_ = function(expansion, sa, sa, keya, temp)
UpperCAmelCase_ = temp[4:] + temp[:4]
UpperCAmelCase_ = function(expansion, sa, sa, keya, temp)
UpperCAmelCase_ = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
UpperCAmelCase_ = apply_table(CT, IP)
UpperCAmelCase_ = function(expansion, sa, sa, keya, temp)
UpperCAmelCase_ = temp[4:] + temp[:4]
UpperCAmelCase_ = function(expansion, sa, sa, keya, temp)
UpperCAmelCase_ = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 705 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor'''
SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowerCAmelCase = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowerCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 664 | 0 |
UpperCAmelCase_ = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 706 |
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list:
if len(_SCREAMING_SNAKE_CASE ) == 0:
return []
_lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = int(max_value - min_value ) + 1
_lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 664 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase ( __lowerCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''ctrl'''
SCREAMING_SNAKE_CASE__ = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=246_534 , _lowerCAmelCase=256 , _lowerCAmelCase=1_280 , _lowerCAmelCase=8_192 , _lowerCAmelCase=48 , _lowerCAmelCase=16 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , **_lowerCAmelCase , ):
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = dff
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
super().__init__(**a_ )
| 707 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCAmelCase__ ( )->Any:
_lowerCAmelCase = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase = get_sagemaker_input()
else:
_lowerCAmelCase = get_cluster_input()
return config
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str:
if subparsers is not None:
_lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str:
_lowerCAmelCase = get_user_input()
if args.config_file is not None:
_lowerCAmelCase = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def UpperCAmelCase__ ( )->List[Any]:
_lowerCAmelCase = config_command_parser()
_lowerCAmelCase = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 664 | 0 |
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''naver-clova-ix/donut-base-finetuned-docvqa'''
SCREAMING_SNAKE_CASE__ = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
SCREAMING_SNAKE_CASE__ = '''document_qa'''
SCREAMING_SNAKE_CASE__ = AutoProcessor
SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE__ = ['''image''', '''text''']
SCREAMING_SNAKE_CASE__ = ['''text''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCAmelCase = task_prompt.replace('''{user_input}''' , _lowerCAmelCase )
_lowerCAmelCase = self.pre_processor.tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors='''pt''' ).input_ids
_lowerCAmelCase = self.pre_processor(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __lowerCAmelCase ( self , _lowerCAmelCase ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_lowerCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_lowerCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_lowerCAmelCase , ).sequences
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.pre_processor.batch_decode(_lowerCAmelCase )[0]
_lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , _lowerCAmelCase , count=1 ).strip() # remove first task start token
_lowerCAmelCase = self.pre_processor.tokenajson(_lowerCAmelCase )
return sequence["answer"]
| 708 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase_ = 1_0
UpperCAmelCase_ = 2_5_6
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]:
if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE )
for token in set(_SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]:
return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class UpperCAmelCase :
def __init__( self , *,
_lowerCAmelCase = 0.85 , ):
_lowerCAmelCase = duplication_jaccard_threshold
_lowerCAmelCase = NUM_PERM
_lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase = defaultdict(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self._index.query(_lowerCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase = [base] + list(_lowerCAmelCase )
# reformat the cluster to be a list of dict
_lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_lowerCAmelCase )
return duplicate_clusters
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.get_duplicate_clusters()
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = element
_lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str:
_lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ):
di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float:
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]:
_lowerCAmelCase = []
for elementa in cluster:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase = 1
extremes.append(_SCREAMING_SNAKE_CASE )
return extremes
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple:
global _shared_dataset
_lowerCAmelCase = dataset
_lowerCAmelCase = []
_lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(_SCREAMING_SNAKE_CASE )
return extremes_list
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase = {}
_lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase = element
_lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
return ds_filter, duplicate_clusters
| 664 | 0 |
import random
from typing import Any
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list[Any]:
for _ in range(len(_A ) ):
_lowerCAmelCase = random.randint(0 , len(_A ) - 1 )
_lowerCAmelCase = random.randint(0 , len(_A ) - 1 )
_lowerCAmelCase , _lowerCAmelCase = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ = ['python', 'says', 'hello', '!']
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 709 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = process
_lowerCAmelCase = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
_lowerCAmelCase = self.dataset[i]
_lowerCAmelCase = self.process(_lowerCAmelCase , **self.params )
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowerCAmelCase = loader
_lowerCAmelCase = infer
_lowerCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowerCAmelCase = None
_lowerCAmelCase = loader_batch_size
# Internal bookkeeping
_lowerCAmelCase = None
_lowerCAmelCase = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowerCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowerCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Convert ModelOutput to tuple first
_lowerCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowerCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowerCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowerCAmelCase = next(self.iterator )
_lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_lowerCAmelCase = processed
_lowerCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
_lowerCAmelCase = None
return self
def __lowerCAmelCase ( self ):
if self.subiterator is None:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_lowerCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
_lowerCAmelCase = next(self.subiterator )
return processed
class UpperCAmelCase ( snake_case_ ):
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowerCAmelCase = False
_lowerCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
_lowerCAmelCase = processed
_lowerCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
else:
_lowerCAmelCase = processed
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
return accumulator
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return self.dataset[i][self.key]
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = keya
_lowerCAmelCase = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 664 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] )->str:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Union[str, Any]:
return (gray > 1_2_7) & (gray <= 2_5_5)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )->Dict:
_lowerCAmelCase = np.zeros_like(A_ )
_lowerCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
_lowerCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
_lowerCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
_lowerCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
UpperCAmelCase_ = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 710 |
import numpy
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
_lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCAmelCase = numpy.zeros(output_array.shape )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __lowerCAmelCase ( self ):
_lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
for iteration in range(1 , iterations + 1 ):
_lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = input_arr
_lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return (value) * (1 - (value))
def UpperCAmelCase__ ( )->int:
_lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=1_0 , give_loss=_SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] = 0 )->list:
_lowerCAmelCase = length or len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowerCAmelCase = list_data[i + 1], list_data[i]
_lowerCAmelCase = True
return list_data if not swapped else bubble_sort(_SCREAMING_SNAKE_CASE , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
from math import pi, sqrt, tan
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] )->float:
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str )->float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] )->float:
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->float:
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] )->float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] )->float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
_lowerCAmelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any )->float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] )->float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__snake_case , 2 ) * torus_radius * tube_radius
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int )->float:
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->float:
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] )->float:
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] )->float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
_lowerCAmelCase = (sidea + sidea + sidea) / 2
_lowerCAmelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any )->float:
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] )->float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->float:
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] )->float:
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int )->float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] )->float:
if not isinstance(__snake_case , __snake_case ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(F"""Rectangle: {area_rectangle(1_0, 2_0) = }""")
print(F"""Square: {area_square(1_0) = }""")
print(F"""Triangle: {area_triangle(1_0, 1_0) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }""")
print(F"""Parallelogram: {area_parallelogram(1_0, 2_0) = }""")
print(F"""Rhombus: {area_rhombus(1_0, 2_0) = }""")
print(F"""Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }""")
print(F"""Circle: {area_circle(2_0) = }""")
print(F"""Ellipse: {area_ellipse(1_0, 2_0) = }""")
print("\nSurface Areas of various geometric shapes: \n")
print(F"""Cube: {surface_area_cube(2_0) = }""")
print(F"""Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }""")
print(F"""Sphere: {surface_area_sphere(2_0) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(2_0) = }""")
print(F"""Cone: {surface_area_cone(1_0, 2_0) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }""")
print(F"""Cylinder: {surface_area_cylinder(1_0, 2_0) = }""")
print(F"""Torus: {surface_area_torus(2_0, 1_0) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 1_0) = }""")
print(F"""Square: {area_reg_polygon(4, 1_0) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 1_0) = }""")
| 712 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool:
_lowerCAmelCase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]:
if function is None:
return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = starting_batch_size
def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() )
# Guard against user error
if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1):
_lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except Exception as e:
if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 664 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None )->Optional[Any]:
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_lowerCAmelCase = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
_lowerCAmelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
_lowerCAmelCase = requests.get(url + f'''&page={i + 2}''' , headers=lowerCamelCase_ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int]=None )->int:
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_lowerCAmelCase = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
_lowerCAmelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
_lowerCAmelCase = requests.get(url + f'''&page={i + 2}''' , headers=lowerCamelCase_ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int )->List[str]:
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_lowerCAmelCase = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ , allow_redirects=lowerCamelCase_ )
_lowerCAmelCase = result.headers["""Location"""]
_lowerCAmelCase = requests.get(lowerCamelCase_ , allow_redirects=lowerCamelCase_ )
_lowerCAmelCase = os.path.join(lowerCamelCase_ , f'''{artifact_name}.zip''' )
with open(lowerCamelCase_ , '''wb''' ) as fp:
fp.write(response.content )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any=None )->Union[str, Any]:
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCamelCase_ ) as f:
for line in f:
_lowerCAmelCase = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(''': ''' )]
_lowerCAmelCase = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
_lowerCAmelCase = line[len('''FAILED ''' ) :]
failed_tests.append(lowerCamelCase_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCamelCase_ )} for `errors` '''
f'''and {len(lowerCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
''' problem.''' )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(lowerCamelCase_ , lowerCamelCase_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(lowerCamelCase_ , lowerCamelCase_ )]
return result
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int=None )->Optional[int]:
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for p in os.listdir(lowerCamelCase_ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCamelCase_ , job_links=lowerCamelCase_ ) )
return errors
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any]=None )->Tuple:
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=lowerCamelCase_ ) )
return r
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple )->List[Any]:
_lowerCAmelCase = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
_lowerCAmelCase = test.split('''/''' )[2]
else:
_lowerCAmelCase = None
return test
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str]=None )->Dict:
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"""count""": n_errors, """errors""": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=lowerCamelCase_ ) )
return r
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] )->Any:
_lowerCAmelCase = """| no. | error | status |"""
_lowerCAmelCase = """|-:|:-|:-|"""
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["""count"""]
_lowerCAmelCase = f'''| {count} | {error[:1_0_0]} | |'''
lines.append(lowerCamelCase_ )
return "\n".join(lowerCamelCase_ )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->Union[str, Any]:
_lowerCAmelCase = """| model | no. of errors | major error | count |"""
_lowerCAmelCase = """|-:|-:|-:|-:|"""
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["""count"""]
_lowerCAmelCase = list(reduced_by_model[model]['''errors'''].items() )[0]
_lowerCAmelCase = f'''| {model} | {count} | {error[:6_0]} | {_count} |'''
lines.append(lowerCamelCase_ )
return "\n".join(lowerCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
UpperCAmelCase_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCAmelCase_ = get_job_links(args.workflow_run_id, token=args.token)
UpperCAmelCase_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCAmelCase_ = k.find(" / ")
UpperCAmelCase_ = k[index + len(" / ") :]
UpperCAmelCase_ = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCAmelCase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCAmelCase_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCAmelCase_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCAmelCase_ = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCAmelCase_ = reduce_by_error(errors)
UpperCAmelCase_ = reduce_by_model(errors)
UpperCAmelCase_ = make_github_table(reduced_by_error)
UpperCAmelCase_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 713 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = 300
return config
def __lowerCAmelCase ( self ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = True
_lowerCAmelCase = MraModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = ()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def __lowerCAmelCase ( self ):
return
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 664 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class UpperCAmelCase ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = 'lxmert'
SCREAMING_SNAKE_CASE__ = {}
def __init__( self , _lowerCAmelCase=30_522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=9_500 , _lowerCAmelCase=1_600 , _lowerCAmelCase=400 , _lowerCAmelCase=3_072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=9 , _lowerCAmelCase=5 , _lowerCAmelCase=5 , _lowerCAmelCase=2_048 , _lowerCAmelCase=4 , _lowerCAmelCase=6.67 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ):
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = num_qa_labels
_lowerCAmelCase = num_object_labels
_lowerCAmelCase = num_attr_labels
_lowerCAmelCase = l_layers
_lowerCAmelCase = x_layers
_lowerCAmelCase = r_layers
_lowerCAmelCase = visual_feat_dim
_lowerCAmelCase = visual_pos_dim
_lowerCAmelCase = visual_loss_normalizer
_lowerCAmelCase = task_matched
_lowerCAmelCase = task_mask_lm
_lowerCAmelCase = task_obj_predict
_lowerCAmelCase = task_qa
_lowerCAmelCase = visual_obj_loss
_lowerCAmelCase = visual_attr_loss
_lowerCAmelCase = visual_feat_loss
_lowerCAmelCase = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**UpperCamelCase_ )
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 664 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase ( _UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = field(
default=0.0 ,metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
SCREAMING_SNAKE_CASE__ = field(default=_UpperCamelCase ,metadata={'''help''': '''Whether to SortishSamler or not.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=_UpperCamelCase ,metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
SCREAMING_SNAKE_CASE__ = field(default=_UpperCamelCase ,metadata={'''help''': '''whether to use adafactor'''} )
SCREAMING_SNAKE_CASE__ = field(
default=_UpperCamelCase ,metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=_UpperCamelCase ,metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
SCREAMING_SNAKE_CASE__ = field(default=_UpperCamelCase ,metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=_UpperCamelCase ,metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
SCREAMING_SNAKE_CASE__ = field(
default='''linear''' ,metadata={'''help''': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} ,)
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ):
_lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18}
_lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_frames
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = crop_size
def __lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
_lowerCAmelCase = VivitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 664 | 0 |
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] )->float:
return round(float(moles / volume ) * nfactor )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )->float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple )->float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] )->float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ):
_lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ):
_lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int:
_lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]:
_lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCAmelCase = scount * numref
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCAmelCase = ccount * numref
# KEEP
_lowerCAmelCase = sgramcounter_rep & cgramcounter_rep
_lowerCAmelCase = keepgramcounter_rep & rgramcounter
_lowerCAmelCase = sgramcounter_rep & rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCAmelCase = sgramcounter_rep - cgramcounter_rep
_lowerCAmelCase = delgramcounter_rep - rgramcounter
_lowerCAmelCase = sgramcounter_rep - rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE )
# ADDITION
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]:
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = ssent.split(''' ''' )
_lowerCAmelCase = csent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for rsent in rsents:
_lowerCAmelCase = rsent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
_lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
_lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sentence
if not return_str:
_lowerCAmelCase = normalized_sent.split()
return normalized_sent
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str:
if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_lowerCAmelCase = 0
for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] )
_lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE )
return 1_0_0 * sari_score
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str:
_lowerCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
_lowerCAmelCase = sacrebleu.corpus_bleu(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = {}
result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
return result
| 664 | 0 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->str:
_lowerCAmelCase = len(snake_case__ )
_lowerCAmelCase = len(snake_case__ )
_lowerCAmelCase = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCAmelCase = []
for char_count in range(snake_case__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(snake_case__ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 717 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DeiTFeatureExtractor"]
UpperCAmelCase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=99 , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=9 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase=8 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.002 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=None , _lowerCAmelCase=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = encoder_seq_length
_lowerCAmelCase = decoder_seq_length
# For common tests
_lowerCAmelCase = self.decoder_seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_attention_mask
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = d_ff
_lowerCAmelCase = relative_attention_num_buckets
_lowerCAmelCase = dropout_rate
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = decoder_start_token_id
_lowerCAmelCase = None
_lowerCAmelCase = decoder_layers
def __lowerCAmelCase ( self ):
return TaConfig.from_pretrained('''google/umt5-base''' )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , ):
if attention_mask is None:
_lowerCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
_lowerCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
_lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = config.num_attention_heads
_lowerCAmelCase = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __lowerCAmelCase ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_lowerCAmelCase = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
_lowerCAmelCase = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
_lowerCAmelCase = result.last_hidden_state
_lowerCAmelCase = result.past_key_values
_lowerCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
_lowerCAmelCase = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
_lowerCAmelCase = model(UpperCAmelCase__ )
_lowerCAmelCase = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
_lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = model(UpperCAmelCase__ )['''last_hidden_state''']
_lowerCAmelCase = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
_lowerCAmelCase = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class UpperCAmelCase ( snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ = [0.8, 0.9]
def __lowerCAmelCase ( self ):
_lowerCAmelCase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = config_and_inputs[0]
_lowerCAmelCase = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
_lowerCAmelCase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
_lowerCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
_lowerCAmelCase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowerCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __lowerCAmelCase ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
_lowerCAmelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
_lowerCAmelCase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_lowerCAmelCase = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids
# fmt: off
_lowerCAmelCase = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase = model.generate(input_ids.to(UpperCAmelCase__ ) )
_lowerCAmelCase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 718 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
_lowerCAmelCase = [0] * n
_lowerCAmelCase = [False] * n
_lowerCAmelCase = [False] * n
def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase = True
_lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase = True
else:
_lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
_lowerCAmelCase = 0
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
UpperCAmelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 664 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 719 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase ( snake_case_ ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = '''pt'''
_lowerCAmelCase = '''tf'''
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase )
model_tf.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 664 | 0 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->List[Any]:
_lowerCAmelCase = [0] * len(__a )
_lowerCAmelCase = []
_lowerCAmelCase = [1] * len(__a )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__a ) ):
if indegree[i] == 0:
queue.append(__a )
while queue:
_lowerCAmelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCAmelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__a )
print(max(__a ) )
# Adjacency list of Graph
UpperCAmelCase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 720 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = DiTPipeline
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('''mps''' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''cpu'''
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCAmelCase ( self ):
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 664 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( _snake_case ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = CodeGenTokenizer
SCREAMING_SNAKE_CASE__ = CodeGenTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = {"""add_prefix_space""": True}
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_lowerCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCAmelCase = {"unk_token": "<unk>"}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(snake_case_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case_ ) )
def __lowerCAmelCase ( self , **_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , **_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = "lower newer"
_lowerCAmelCase = "lower newer"
return input_text, output_text
def __lowerCAmelCase ( self ):
_lowerCAmelCase = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase = "lower newer"
_lowerCAmelCase = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCAmelCase = tokenizer.tokenize(snake_case_ , add_prefix_space=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_lowerCAmelCase = tokens + [tokenizer.unk_token]
_lowerCAmelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer(add_prefix_space=snake_case_ )
_lowerCAmelCase = "lower newer"
# Testing tokenization
_lowerCAmelCase = tokenizer.tokenize(snake_case_ , add_prefix_space=snake_case_ )
_lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
_lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ )
_lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
_lowerCAmelCase = self.get_rust_tokenizer(add_prefix_space=snake_case_ )
_lowerCAmelCase = tokenizer.encode(snake_case_ , add_prefix_space=snake_case_ )
_lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing the unknown token
_lowerCAmelCase = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __lowerCAmelCase ( self , _lowerCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
# Simple input
_lowerCAmelCase = "This is a simple input"
_lowerCAmelCase = ["This is a simple input 1", "This is a simple input 2"]
_lowerCAmelCase = ("This is a simple input", "This is a pair")
_lowerCAmelCase = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='''max_length''' )
# Simple input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' )
# Simple input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' , )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='''max_length''' )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' )
# Pair input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' , )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_lowerCAmelCase = "This is a simple input"
_lowerCAmelCase = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCAmelCase = ("This is a simple input", "This is a pair")
_lowerCAmelCase = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCAmelCase = tokenizer.pad_token_id
_lowerCAmelCase = tokenizer(snake_case_ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
_lowerCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , truncate=snake_case_ , return_tensors='''np''' )
_lowerCAmelCase = tokenizer(*snake_case_ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
_lowerCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , truncate=snake_case_ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = "$$$"
_lowerCAmelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case_ , add_bos_token=snake_case_ )
_lowerCAmelCase = "This is a simple input"
_lowerCAmelCase = ["This is a simple input 1", "This is a simple input 2"]
_lowerCAmelCase = tokenizer.bos_token_id
_lowerCAmelCase = tokenizer(snake_case_ )
_lowerCAmelCase = tokenizer(snake_case_ )
self.assertEqual(out_s.input_ids[0] , snake_case_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , snake_case_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
_lowerCAmelCase = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCAmelCase = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCAmelCase = tokenizer.encode(snake_case_ )
_lowerCAmelCase = ["^#", re.escape('''<|endoftext|>''' ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCAmelCase = tokenizer.decode(snake_case_ , truncate_before_pattern=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self ):
pass
| 721 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ = {"UserAgent": UserAgent().random}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict:
_lowerCAmelCase = script.contents[0]
_lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = F'''https://www.instagram.com/{username}/'''
_lowerCAmelCase = self.get_json()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text
_lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __lowerCAmelCase ( self ):
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_private"]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = InstagramUser("github")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 664 | 0 |
import math
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float )->float:
if (
not isinstance(_SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float )->float:
if (
not isinstance(_SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]:
_lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
# Initialize Result
_lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_SCREAMING_SNAKE_CASE ):
# Find denominations
while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ):
total_value -= int(_SCREAMING_SNAKE_CASE )
answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ = []
UpperCAmelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCAmelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 664 | 0 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCAmelCase ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = ["input_values", "padding_mask"]
def __init__( self , _lowerCAmelCase = 1 , _lowerCAmelCase = 24_000 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase )
_lowerCAmelCase = chunk_length_s
_lowerCAmelCase = overlap
@property
def __lowerCAmelCase ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
_lowerCAmelCase = True
_lowerCAmelCase = bool(
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_lowerCAmelCase = [np.asarray(_lowerCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
_lowerCAmelCase = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase = [np.asarray(_lowerCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(_lowerCamelCase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
_lowerCAmelCase = None
_lowerCAmelCase = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_lowerCAmelCase = min(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.floor(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_lowerCAmelCase = max(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.ceil(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_lowerCAmelCase = '''max_length'''
else:
_lowerCAmelCase = input_values
# normal padding on batch
if padded_inputs is None:
_lowerCAmelCase = self.pad(
_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , padding=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if padding:
_lowerCAmelCase = padded_inputs.pop('''attention_mask''' )
_lowerCAmelCase = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
_lowerCAmelCase = example[..., None]
input_values.append(example.T )
_lowerCAmelCase = input_values
if return_tensors is not None:
_lowerCAmelCase = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
| 701 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict:
# Initialise PyTorch model
_lowerCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 664 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCAmelCase_ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] )->Dict:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCAmelCase = '''lm_head'''
_lowerCAmelCase = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
_lowerCAmelCase = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
_lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] )->List[Any]:
_lowerCAmelCase = []
_lowerCAmelCase = fairseq_model.state_dict()
_lowerCAmelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(lowerCamelCase_ )[0].split('''.''' )[-2]
_lowerCAmelCase = mapped_key.replace('''*''' , lowerCamelCase_ )
if "weight_g" in name:
_lowerCAmelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCAmelCase = '''weight_v'''
elif "bias" in name:
_lowerCAmelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase = '''weight'''
else:
_lowerCAmelCase = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] )->Union[str, Any]:
_lowerCAmelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCAmelCase = name.split('''.''' )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : List[str]=True )->Optional[int]:
if config_path is not None:
_lowerCAmelCase = UniSpeechConfig.from_pretrained(lowerCamelCase_ )
else:
_lowerCAmelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCAmelCase = Dictionary.load_from_json(lowerCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase = target_dict.pad_index
_lowerCAmelCase = target_dict.bos_index
_lowerCAmelCase = target_dict.eos_index
_lowerCAmelCase = len(target_dict.symbols )
_lowerCAmelCase = os.path.join(lowerCamelCase_ , '''vocab.json''' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
_lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_3
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCamelCase_ , )
_lowerCAmelCase = True if config.feat_extract_norm == '''layer''' else False
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
_lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
_lowerCAmelCase = UniSpeechForCTC(lowerCamelCase_ )
else:
_lowerCAmelCase = UniSpeechForPreTraining(lowerCamelCase_ )
if is_finetuned:
_lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCAmelCase = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
hf_unispeech.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCAmelCase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 702 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "Hello world! cécé herlolip"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]:
_lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
_lowerCAmelCase = roberta.model.encoder.sentence_encoder
_lowerCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight
_lowerCAmelCase = roberta_sent_encoder.embed_positions.weight
_lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowerCAmelCase = roberta_sent_encoder.layer_norm.weight
_lowerCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCAmelCase = model.roberta.encoder.layer[i]
_lowerCAmelCase = roberta_sent_encoder.layers[i]
_lowerCAmelCase = layer.attention
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowerCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowerCAmelCase = roberta_layer.self_attn.q_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.q_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.k_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.k_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.v_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowerCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowerCAmelCase = roberta_layer.self_attn.out_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowerCAmelCase = roberta_layer.final_layer_norm.weight
_lowerCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
_lowerCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# output
_lowerCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0]
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) )
else:
_lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
_lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 664 | 0 |
'''simple docstring'''
import baseaa
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->bytes:
return baseaa.baaencode(string.encode('''utf-8''' ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->str:
return baseaa.baadecode(UpperCamelCase__ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded)
| 703 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = i / num_diffusion_timesteps
_lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ):
if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None:
_lowerCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowerCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCAmelCase = 1.0 - self.betas
_lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCAmelCase = 1.0
# setable values
_lowerCAmelCase = None
_lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
return sample
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_lowerCAmelCase = num_inference_steps
_lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
# 1. get previous step value (=t+1)
_lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCAmelCase = self.alphas_cumprod[timestep]
_lowerCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCAmelCase = model_output
elif self.config.prediction_type == "sample":
_lowerCAmelCase = model_output
_lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 664 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] )->Optional[int]:
_lowerCAmelCase = b.T
_lowerCAmelCase = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=1 )
_lowerCAmelCase = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=0 )
_lowerCAmelCase = np.matmul(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_lowerCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict:
_lowerCAmelCase = x.reshape(-1 , 3 )
_lowerCAmelCase = squared_euclidean_distance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return np.argmin(__SCREAMING_SNAKE_CASE , axis=1 )
class UpperCAmelCase ( snake_case__ ):
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = True , **_lowerCAmelCase , ):
super().__init__(**lowercase_ )
_lowerCAmelCase = size if size is not None else {"height": 256, "width": 256}
_lowerCAmelCase = get_size_dict(lowercase_ )
_lowerCAmelCase = np.array(lowercase_ ) if clusters is not None else None
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_normalize
_lowerCAmelCase = do_color_quantize
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowerCAmelCase = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase_ , size=(size['''height'''], size['''width''']) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , ):
_lowerCAmelCase = rescale(image=lowercase_ , scale=1 / 127.5 , data_format=lowercase_ )
_lowerCAmelCase = image - 1
return image
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ):
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(lowercase_ )
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_lowerCAmelCase = clusters if clusters is not None else self.clusters
_lowerCAmelCase = np.array(lowercase_ )
_lowerCAmelCase = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(image=lowercase_ ) for image in images]
if do_color_quantize:
_lowerCAmelCase = [to_channel_dimension_format(lowercase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_lowerCAmelCase = np.array(lowercase_ )
_lowerCAmelCase = color_quantize(lowercase_ , lowercase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_lowerCAmelCase = images.shape[0]
_lowerCAmelCase = images.reshape(lowercase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_lowerCAmelCase = list(lowercase_ )
else:
_lowerCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
_lowerCAmelCase = {"input_ids": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 704 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase_ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->Any:
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 1_0:
_lowerCAmelCase = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 1_0:
_lowerCAmelCase = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 1_0:
_lowerCAmelCase = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 1_0:
_lowerCAmelCase = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
_lowerCAmelCase = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
_lowerCAmelCase = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict )->Optional[int]:
_lowerCAmelCase = {}
import re
_lowerCAmelCase = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
_lowerCAmelCase = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_lowerCAmelCase = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
_lowerCAmelCase = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
_lowerCAmelCase = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_lowerCAmelCase = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
_lowerCAmelCase = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
_lowerCAmelCase = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
_lowerCAmelCase = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = re_encoder_block_conv_in.match(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = regex_match.groups()
_lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
_lowerCAmelCase = re_encoder_block_conv_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = re_encoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = regex_match.groups()
_lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
_lowerCAmelCase = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
_lowerCAmelCase = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_lowerCAmelCase = prefix + resnet_block
_lowerCAmelCase = re_encoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = re_encoder_block_proj_out.match(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = regex_match.groups()
_lowerCAmelCase = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
_lowerCAmelCase = re_encoder_block_proj_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = re_decoder_block_conv_out.match(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = regex_match.groups()
_lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
_lowerCAmelCase = re_decoder_block_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = re_decoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = regex_match.groups()
_lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
_lowerCAmelCase = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
_lowerCAmelCase = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_lowerCAmelCase = prefix + resnet_block
_lowerCAmelCase = re_decoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = re_decoder_block_proj_in.match(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = regex_match.groups()
_lowerCAmelCase = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
_lowerCAmelCase = re_decoder_block_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = re_prior_cond_conv_out.match(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = regex_match.groups()
_lowerCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
_lowerCAmelCase = re_prior_cond_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = re_prior_cond_resnet.match(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = regex_match.groups()
_lowerCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
_lowerCAmelCase = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
_lowerCAmelCase = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_lowerCAmelCase = prefix + resnet_block
_lowerCAmelCase = re_prior_cond_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = re_prior_cond_proj_in.match(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = regex_match.groups()
_lowerCAmelCase = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
_lowerCAmelCase = re_prior_cond_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# keep original key
else:
_lowerCAmelCase = original_key
_lowerCAmelCase = replace_key(_SCREAMING_SNAKE_CASE )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
_lowerCAmelCase = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
_lowerCAmelCase = original_key
_lowerCAmelCase = original_key
_lowerCAmelCase = value
return new_dict
@torch.no_grad()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None )->Dict:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
_lowerCAmelCase = requests.get(f'''{PREFIX}{file}''' , allow_redirects=_SCREAMING_SNAKE_CASE )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=_SCREAMING_SNAKE_CASE )
open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , '''wb''' ).write(r.content )
_lowerCAmelCase = MODEL_MAPPING[model_name.split('''/''' )[-1]]
_lowerCAmelCase = JukeboxConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = JukeboxModel(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = []
_lowerCAmelCase = {}
for i, dict_name in enumerate(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )['''model''']
_lowerCAmelCase = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
_lowerCAmelCase = old_dic[k]
elif k.endswith('''.w''' ):
_lowerCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase = old_dic[k]
else:
_lowerCAmelCase = old_dic[k]
_lowerCAmelCase = '''vqvae''' if i == 0 else f'''priors.{3 - i}'''
_lowerCAmelCase = fix_jukebox_keys(_SCREAMING_SNAKE_CASE , model.state_dict() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
weight_dict.append(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase_ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 705 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor'''
SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowerCAmelCase = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowerCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 664 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class UpperCAmelCase ( __lowercase ):
def __init__( self , **_lowerCAmelCase ):
super().__init__(**_lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , **_lowerCAmelCase ):
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="This is a photo of {}." ):
_lowerCAmelCase = load_image(_lowerCAmelCase )
_lowerCAmelCase = self.image_processor(images=[image] , return_tensors=self.framework )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowerCAmelCase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework , padding=_lowerCAmelCase )
_lowerCAmelCase = [text_inputs]
return inputs
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = model_inputs.pop('''candidate_labels''' )
_lowerCAmelCase = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _lowerCAmelCase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = model_outputs.pop('''candidate_labels''' )
_lowerCAmelCase = model_outputs["logits"][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=-1 ).squeeze(-1 )
_lowerCAmelCase = probs.tolist()
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = [scores]
elif self.framework == "tf":
_lowerCAmelCase = stable_softmax(_lowerCAmelCase , axis=-1 )
_lowerCAmelCase = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowerCAmelCase = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(_lowerCAmelCase , _lowerCAmelCase ) , key=lambda _lowerCAmelCase : -x[0] )
]
return result
| 706 |
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list:
if len(_SCREAMING_SNAKE_CASE ) == 0:
return []
_lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = int(max_value - min_value ) + 1
_lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 664 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
UpperCAmelCase_ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def UpperCAmelCase__ ( )->Optional[int]:
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_lowerCAmelCase = parser.parse_args()
return args.f
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str="eval" )->str:
_lowerCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , f'''{split}_results.json''' )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , '''r''' ) as f:
return json.load(_SCREAMING_SNAKE_CASE )
raise ValueError(f'''can\'t find {path}''' )
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase ( snake_case_ ):
'''simple docstring'''
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = F'''\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_flax_glue.main()
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = F'''\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_clm_flax.main()
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = F'''\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_summarization_flax.main()
_lowerCAmelCase = get_results(__UpperCamelCase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = F'''\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_mlm_flax.main()
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = F'''\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_ta_mlm_flax.main()
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowerCAmelCase ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_lowerCAmelCase = 7 if get_gpu_count() > 1 else 2
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = F'''\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_flax_ner.main()
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = F'''\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '''.split()
with patch.object(__UpperCamelCase , '''argv''' , __UpperCamelCase ):
run_qa.main()
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 707 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCAmelCase__ ( )->Any:
_lowerCAmelCase = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase = get_sagemaker_input()
else:
_lowerCAmelCase = get_cluster_input()
return config
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str:
if subparsers is not None:
_lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str:
_lowerCAmelCase = get_user_input()
if args.config_file is not None:
_lowerCAmelCase = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def UpperCAmelCase__ ( )->List[Any]:
_lowerCAmelCase = config_command_parser()
_lowerCAmelCase = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 664 | 0 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase_ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->List[str]:
_lowerCAmelCase = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] )->Optional[Any]:
_lowerCAmelCase = list(s_dict.keys() )
for key in keys:
_lowerCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_lowerCAmelCase = new_key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f'''{key} -> {new_key}''' )
_lowerCAmelCase = s_dict.pop(_SCREAMING_SNAKE_CASE )
return s_dict
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
_lowerCAmelCase = emb.weight.shape
_lowerCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->bytes:
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = os.path.basename(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = url.split('''/''' )[-2]
_lowerCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = open(_SCREAMING_SNAKE_CASE , '''rb''' ).read()
if hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_SCREAMING_SNAKE_CASE ) as source, open(_SCREAMING_SNAKE_CASE , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=_SCREAMING_SNAKE_CASE , unit_divisor=1_0_2_4 ) as loop:
while True:
_lowerCAmelCase = source.read(8_1_9_2 )
if not buffer:
break
output.write(_SCREAMING_SNAKE_CASE )
loop.update(len(_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = open(_SCREAMING_SNAKE_CASE , '''rb''' ).read()
if hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any )->Optional[Any]:
if ".pt" not in checkpoint_path:
_lowerCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_lowerCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
_lowerCAmelCase = original_checkpoint["""dims"""]
_lowerCAmelCase = original_checkpoint["""model_state_dict"""]
_lowerCAmelCase = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
rename_keys(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = True
_lowerCAmelCase = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
_lowerCAmelCase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_SCREAMING_SNAKE_CASE , decoder_ffn_dim=_SCREAMING_SNAKE_CASE , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
_lowerCAmelCase = WhisperForConditionalGeneration(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0 and not set(_SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
_lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCAmelCase = proj_out_weights
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 708 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase_ = 1_0
UpperCAmelCase_ = 2_5_6
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]:
if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE )
for token in set(_SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]:
return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class UpperCAmelCase :
def __init__( self , *,
_lowerCAmelCase = 0.85 , ):
_lowerCAmelCase = duplication_jaccard_threshold
_lowerCAmelCase = NUM_PERM
_lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase = defaultdict(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self._index.query(_lowerCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase = [base] + list(_lowerCAmelCase )
# reformat the cluster to be a list of dict
_lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_lowerCAmelCase )
return duplicate_clusters
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.get_duplicate_clusters()
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = element
_lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str:
_lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ):
di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float:
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]:
_lowerCAmelCase = []
for elementa in cluster:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase = 1
extremes.append(_SCREAMING_SNAKE_CASE )
return extremes
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple:
global _shared_dataset
_lowerCAmelCase = dataset
_lowerCAmelCase = []
_lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(_SCREAMING_SNAKE_CASE )
return extremes_list
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase = {}
_lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase = element
_lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
return ds_filter, duplicate_clusters
| 664 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
UpperCAmelCase_ = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
UpperCAmelCase_ = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
UpperCAmelCase_ = BeautifulSoup(res.text, "html.parser")
UpperCAmelCase_ = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 709 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = process
_lowerCAmelCase = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
_lowerCAmelCase = self.dataset[i]
_lowerCAmelCase = self.process(_lowerCAmelCase , **self.params )
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowerCAmelCase = loader
_lowerCAmelCase = infer
_lowerCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowerCAmelCase = None
_lowerCAmelCase = loader_batch_size
# Internal bookkeeping
_lowerCAmelCase = None
_lowerCAmelCase = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowerCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowerCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Convert ModelOutput to tuple first
_lowerCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowerCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowerCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowerCAmelCase = next(self.iterator )
_lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_lowerCAmelCase = processed
_lowerCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
_lowerCAmelCase = None
return self
def __lowerCAmelCase ( self ):
if self.subiterator is None:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_lowerCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
_lowerCAmelCase = next(self.subiterator )
return processed
class UpperCAmelCase ( snake_case_ ):
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowerCAmelCase = False
_lowerCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
_lowerCAmelCase = processed
_lowerCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
else:
_lowerCAmelCase = processed
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
return accumulator
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return self.dataset[i][self.key]
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = keya
_lowerCAmelCase = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 664 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a__ ):
SCREAMING_SNAKE_CASE__ = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE__ = "ChineseCLIPImageProcessor"
SCREAMING_SNAKE_CASE__ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase_ , )
_lowerCAmelCase = kwargs.pop('''feature_extractor''' )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase = self.image_processor
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_lowerCAmelCase = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if images is not None:
_lowerCAmelCase = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None and images is not None:
_lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase_ , )
return self.image_processor_class
| 710 |
import numpy
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
_lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCAmelCase = numpy.zeros(output_array.shape )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __lowerCAmelCase ( self ):
_lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
for iteration in range(1 , iterations + 1 ):
_lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = input_arr
_lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return (value) * (1 - (value))
def UpperCAmelCase__ ( )->int:
_lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=1_0 , give_loss=_SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = str(id_ )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = []
_lowerCAmelCase = {} # {vertex:distance}
def __lt__( self , _lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def __lowerCAmelCase ( self , _lowerCAmelCase ):
self.neighbors.append(UpperCamelCase__ )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = weight
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple )->Optional[Any]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : Vertex )->Optional[int]:
_lowerCAmelCase = []
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = graph[:]
while q:
_lowerCAmelCase = min(_SCREAMING_SNAKE_CASE )
q.remove(_SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : Vertex )->int:
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
hq.heapify(_SCREAMING_SNAKE_CASE )
while h:
_lowerCAmelCase = hq.heappop(_SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
hq.heapify(_SCREAMING_SNAKE_CASE )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase__ ( )->Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCAmelCase :
# setable values
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None # sigma(t_i)
@classmethod
def __lowerCAmelCase ( cls ):
return cls()
@dataclass
class UpperCAmelCase ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = 4_2
SCREAMING_SNAKE_CASE__ = 4_2
SCREAMING_SNAKE_CASE__ = 4_2
class UpperCAmelCase ( __UpperCAmelCase ,__UpperCAmelCase ):
@property
def __lowerCAmelCase ( self ):
return True
@register_to_config
def __init__( self , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 100 , _lowerCAmelCase = 1.007 , _lowerCAmelCase = 80 , _lowerCAmelCase = 0.05 , _lowerCAmelCase = 50 , ):
pass
def __lowerCAmelCase ( self ):
return KarrasVeSchedulerState.create()
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = () ):
_lowerCAmelCase = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy()
_lowerCAmelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if self.config.s_min <= sigma <= self.config.s_max:
_lowerCAmelCase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_lowerCAmelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowerCAmelCase = random.split(lowerCAmelCase_ , num=1 )
_lowerCAmelCase = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape )
_lowerCAmelCase = sigma + gamma * sigma
_lowerCAmelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True , ):
_lowerCAmelCase = sample_hat + sigma_hat * model_output
_lowerCAmelCase = (sample_hat - pred_original_sample) / sigma_hat
_lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True , ):
_lowerCAmelCase = sample_prev + sigma_prev * model_output
_lowerCAmelCase = (sample_prev - pred_original_sample) / sigma_prev
_lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
raise NotImplementedError()
| 712 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool:
_lowerCAmelCase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]:
if function is None:
return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = starting_batch_size
def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() )
# Guard against user error
if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1):
_lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except Exception as e:
if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 664 | 0 |
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list:
if len(a_ ) == 0:
return []
_lowerCAmelCase = min(a_ ), max(a_ )
_lowerCAmelCase = int(max_value - min_value ) + 1
_lowerCAmelCase = [[] for _ in range(a_ )]
for i in my_list:
buckets[int(i - min_value )].append(a_ )
return [v for bucket in buckets for v in sorted(a_ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 713 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = 300
return config
def __lowerCAmelCase ( self ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = True
_lowerCAmelCase = MraModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = ()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def __lowerCAmelCase ( self ):
return
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 664 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
@property
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.dummy_uncond_unet
_lowerCAmelCase = ScoreSdeVeScheduler()
_lowerCAmelCase = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_A ).images
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_A , return_dict=_A )[
0
]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''google/ncsnpp-church-256'''
_lowerCAmelCase = UNetaDModel.from_pretrained(_A )
_lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(_A )
_lowerCAmelCase = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=_A ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 664 | 0 |
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ = "examples/"
UpperCAmelCase_ = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
UpperCAmelCase_ = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
UpperCAmelCase_ = "README.md"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] )->str:
with open(UpperCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowerCAmelCase = f.read()
_lowerCAmelCase , _lowerCAmelCase = REPLACE_PATTERNS[pattern]
_lowerCAmelCase = replace.replace('''VERSION''' , UpperCAmelCase__ )
_lowerCAmelCase = re_pattern.sub(UpperCAmelCase__ , UpperCAmelCase__ )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCAmelCase__ )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple )->List[Any]:
for folder, directories, fnames in os.walk(UpperCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , pattern='''examples''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any]=False )->List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not patch:
update_version_in_examples(UpperCAmelCase__ )
def UpperCAmelCase__ ( )->int:
_lowerCAmelCase = '''🤗 Transformers currently provides the following architectures'''
_lowerCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowerCAmelCase = f.readlines()
# Find the start of the list.
_lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_lowerCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCAmelCase__ )
def UpperCAmelCase__ ( )->Union[str, Any]:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
_lowerCAmelCase = f.read()
_lowerCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCAmelCase__ ).groups()[0]
return packaging.version.parse(UpperCAmelCase__ )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict=False )->Tuple:
_lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_lowerCAmelCase = default_version.base_version
elif patch:
_lowerCAmelCase = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_lowerCAmelCase = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_lowerCAmelCase = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCAmelCase__ ) == 0:
_lowerCAmelCase = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCAmelCase__ , patch=UpperCAmelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def UpperCAmelCase__ ( )->Dict:
_lowerCAmelCase = get_version()
_lowerCAmelCase = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
_lowerCAmelCase = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCAmelCase__ ) == 0:
_lowerCAmelCase = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCAmelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
UpperCAmelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ):
_lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18}
_lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_frames
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = crop_size
def __lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
_lowerCAmelCase = VivitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 664 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' ,'''False''' ) ) is not True ,reason='''Skipping test because should only be run when releasing minor transformers version''' ,)
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=_lowerCAmelCase , )
assert hasattr(self , '''env''' )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = F'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
_lowerCAmelCase = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_lowerCAmelCase , instance_count=_lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=_lowerCAmelCase , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_lowerCAmelCase , py_version='''py36''' , )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
TrainingJobAnalytics(_lowerCAmelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.create_estimator(_lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _lowerCAmelCase )
| 716 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ):
_lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ):
_lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int:
_lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]:
_lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCAmelCase = scount * numref
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCAmelCase = ccount * numref
# KEEP
_lowerCAmelCase = sgramcounter_rep & cgramcounter_rep
_lowerCAmelCase = keepgramcounter_rep & rgramcounter
_lowerCAmelCase = sgramcounter_rep & rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCAmelCase = sgramcounter_rep - cgramcounter_rep
_lowerCAmelCase = delgramcounter_rep - rgramcounter
_lowerCAmelCase = sgramcounter_rep - rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE )
# ADDITION
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]:
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = ssent.split(''' ''' )
_lowerCAmelCase = csent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for rsent in rsents:
_lowerCAmelCase = rsent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
_lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
_lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sentence
if not return_str:
_lowerCAmelCase = normalized_sent.split()
return normalized_sent
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str:
if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_lowerCAmelCase = 0
for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] )
_lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE )
return 1_0_0 * sari_score
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str:
_lowerCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
_lowerCAmelCase = sacrebleu.corpus_bleu(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = {}
result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
return result
| 664 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple="attention" )->str:
_lowerCAmelCase = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
_lowerCAmelCase = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
_lowerCAmelCase = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
_lowerCAmelCase = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=False )->Tuple:
if split_mlp_wi:
_lowerCAmelCase = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
_lowerCAmelCase = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
_lowerCAmelCase = (wi_a, wi_a)
else:
_lowerCAmelCase = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
_lowerCAmelCase = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] )->List[str]:
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , *, _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] )->List[str]:
_lowerCAmelCase = traverse_util.flatten_dict(variables['''target'''] )
_lowerCAmelCase = {'''/'''.join(_SCREAMING_SNAKE_CASE ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowerCAmelCase = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = collections.OrderedDict()
# Shared embeddings.
_lowerCAmelCase = old['''token_embedder/embedding''']
# Encoder.
for i in range(_SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''encoder''' , '''pre_attention_layer_norm''' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''encoder''' , '''attention''' )
_lowerCAmelCase = layer_norm
_lowerCAmelCase = k.T
_lowerCAmelCase = o.T
_lowerCAmelCase = q.T
_lowerCAmelCase = v.T
# Block i, layer 1 (MLP).
_lowerCAmelCase = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''encoder''' , '''pre_mlp_layer_norm''' )
_lowerCAmelCase , _lowerCAmelCase = tax_mlp_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''encoder''' , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = layer_norm
if split_mlp_wi:
_lowerCAmelCase = wi[0].T
_lowerCAmelCase = wi[1].T
else:
_lowerCAmelCase = wi.T
_lowerCAmelCase = wo.T
_lowerCAmelCase = old[
'''encoder/relpos_bias/rel_embedding'''
].T
_lowerCAmelCase = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(_SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_self_attention_layer_norm''' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''decoder''' , '''self_attention''' )
_lowerCAmelCase = layer_norm
_lowerCAmelCase = k.T
_lowerCAmelCase = o.T
_lowerCAmelCase = q.T
_lowerCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
_lowerCAmelCase = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_cross_attention_layer_norm''' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''decoder''' , '''encoder_decoder_attention''' )
_lowerCAmelCase = layer_norm
_lowerCAmelCase = k.T
_lowerCAmelCase = o.T
_lowerCAmelCase = q.T
_lowerCAmelCase = v.T
# Block i, layer 2 (MLP).
_lowerCAmelCase = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_mlp_layer_norm''' )
_lowerCAmelCase , _lowerCAmelCase = tax_mlp_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''decoder''' , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = layer_norm
if split_mlp_wi:
_lowerCAmelCase = wi[0].T
_lowerCAmelCase = wi[1].T
else:
_lowerCAmelCase = wi.T
_lowerCAmelCase = wo.T
_lowerCAmelCase = old['''decoder/decoder_norm/scale''']
_lowerCAmelCase = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowerCAmelCase = old['''decoder/logits_dense/kernel'''].T
return new
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Union[str, Any]:
_lowerCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_lowerCAmelCase = state_dict['''shared.weight''']
return state_dict
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str )->int:
_lowerCAmelCase = checkpoints.load_tax_checkpoint(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = convert_tax_to_pytorch(_SCREAMING_SNAKE_CASE , num_layers=config.num_layers , is_encoder_only=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = make_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict = False )->List[str]:
_lowerCAmelCase = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowerCAmelCase = TaEncoderModel(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Verify that we can load the checkpoint.
model.from_pretrained(_SCREAMING_SNAKE_CASE )
print('''Done''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
UpperCAmelCase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 717 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DeiTFeatureExtractor"]
UpperCAmelCase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
_lowerCAmelCase = [0] * n
_lowerCAmelCase = [False] * n
_lowerCAmelCase = [False] * n
def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase = True
_lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase = True
else:
_lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
_lowerCAmelCase = 0
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
UpperCAmelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 664 | 0 |
from sklearn.metrics import recall_score
import datasets
UpperCAmelCase_ = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
UpperCAmelCase_ = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
UpperCAmelCase_ = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=1 , _lowerCAmelCase="binary" , _lowerCAmelCase=None , _lowerCAmelCase="warn" , ):
_lowerCAmelCase = recall_score(
_lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase , pos_label=_lowerCAmelCase , average=_lowerCAmelCase , sample_weight=_lowerCAmelCase , zero_division=_lowerCAmelCase , )
return {"recall": float(_lowerCAmelCase ) if score.size == 1 else score}
| 719 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase ( snake_case_ ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = '''pt'''
_lowerCAmelCase = '''tf'''
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase )
model_tf.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 664 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 720 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = DiTPipeline
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('''mps''' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''cpu'''
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCAmelCase ( self ):
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 664 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple )->List[List[ImageInput]]:
if isinstance(lowerCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCamelCase__ ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {'''shortest_edge''': 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name='''crop_size''' )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" in size:
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size['''shortest_edge'''] , default_to_square=_lowerCAmelCase )
elif "height" in size and "width" in size:
_lowerCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ):
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_lowerCAmelCase = to_numpy_array(_lowerCAmelCase )
if do_resize:
_lowerCAmelCase = self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase )
if do_center_crop:
_lowerCAmelCase = self.center_crop(_lowerCAmelCase , size=_lowerCAmelCase )
if do_rescale:
_lowerCAmelCase = self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase )
if do_normalize:
_lowerCAmelCase = self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase )
_lowerCAmelCase = to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase )
return image
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ):
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name='''crop_size''' )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
_lowerCAmelCase = make_batched(_lowerCAmelCase )
_lowerCAmelCase = [
[
self._preprocess_image(
image=_lowerCAmelCase , do_resize=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , do_center_crop=_lowerCAmelCase , crop_size=_lowerCAmelCase , do_rescale=_lowerCAmelCase , rescale_factor=_lowerCAmelCase , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , data_format=_lowerCAmelCase , )
for img in video
]
for video in videos
]
_lowerCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 721 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ = {"UserAgent": UserAgent().random}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict:
_lowerCAmelCase = script.contents[0]
_lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = F'''https://www.instagram.com/{username}/'''
_lowerCAmelCase = self.get_json()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text
_lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __lowerCAmelCase ( self ):
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_private"]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = InstagramUser("github")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 664 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("T")
UpperCAmelCase_ = TypeVar("U")
class UpperCAmelCase ( Generic[T, U] ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = key
_lowerCAmelCase = val
_lowerCAmelCase = None
_lowerCAmelCase = None
def __repr__( self ):
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class UpperCAmelCase ( Generic[T, U] ):
def __init__( self ):
_lowerCAmelCase = DoubleLinkedListNode(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = DoubleLinkedListNode(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = self.rear, self.head
def __repr__( self ):
_lowerCAmelCase = ['''DoubleLinkedList''']
_lowerCAmelCase = self.head
while node.next is not None:
rep.append(str(_lowerCAmelCase ) )
_lowerCAmelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase = node
_lowerCAmelCase = previous
_lowerCAmelCase = node
_lowerCAmelCase = self.rear
def __lowerCAmelCase ( self , _lowerCAmelCase ):
if node.prev is None or node.next is None:
return None
_lowerCAmelCase = node.next
_lowerCAmelCase = node.prev
_lowerCAmelCase = None
_lowerCAmelCase = None
return node
class UpperCAmelCase ( Generic[T, U] ):
SCREAMING_SNAKE_CASE__ = {}
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = DoubleLinkedList()
_lowerCAmelCase = capacity
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = {}
def __repr__( self ):
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , _lowerCAmelCase ):
return key in self.cache
def __lowerCAmelCase ( self , _lowerCAmelCase ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_lowerCAmelCase = self.cache[key]
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_lowerCAmelCase )
return node.val
self.miss += 1
return None
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_lowerCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase = DoubleLinkedListNode(_lowerCAmelCase , _lowerCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCAmelCase = value
self.list.add(_lowerCAmelCase )
@classmethod
def __lowerCAmelCase ( cls , _lowerCAmelCase = 128 ):
def cache_decorator_inner(_lowerCAmelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*_lowerCAmelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase = LRUCache(_lowerCAmelCase )
_lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCAmelCase = func(*_lowerCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , _lowerCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_lowerCAmelCase , '''cache_info''' , _lowerCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]:
_lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
# Initialize Result
_lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_SCREAMING_SNAKE_CASE ):
# Find denominations
while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ):
total_value -= int(_SCREAMING_SNAKE_CASE )
answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ = []
UpperCAmelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCAmelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 664 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , ):
_lowerCAmelCase = size if size is not None else {'''shortest_edge''': 20}
_lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_flip_channel_order
def __lowerCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MobileViTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''center_crop''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_flip_channel_order''' ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __lowerCAmelCase ( self ):
pass
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 701 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict:
# Initialise PyTorch model
_lowerCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 664 | 0 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase :
SCREAMING_SNAKE_CASE__ = field(
default=snake_case_ ,metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(snake_case_ )} )
SCREAMING_SNAKE_CASE__ = field(
default=snake_case_ ,metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=1_2_8 ,metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} ,)
SCREAMING_SNAKE_CASE__ = field(
default=1_2_8 ,metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} ,)
SCREAMING_SNAKE_CASE__ = field(
default=6_4 ,metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} ,)
SCREAMING_SNAKE_CASE__ = field(
default=3_0 ,metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} ,)
SCREAMING_SNAKE_CASE__ = field(
default=snake_case_ ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
SCREAMING_SNAKE_CASE__ = field(
default=snake_case_ ,metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=0.0 ,metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=2_0 ,metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=0 ,metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} ,)
SCREAMING_SNAKE_CASE__ = field(default=1 ,metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''train'''
SCREAMING_SNAKE_CASE__ = '''dev'''
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 4_2
SCREAMING_SNAKE_CASE__ = 4_2
SCREAMING_SNAKE_CASE__ = 4_2
SCREAMING_SNAKE_CASE__ = 4_2
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = Split.train , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = "pt" , ):
_lowerCAmelCase = args
_lowerCAmelCase = is_language_sensitive
_lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
_lowerCAmelCase = mode
# Load data features from cache or dataset file
_lowerCAmelCase = '''v2''' if args.version_2_with_negative else '''v1'''
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + '''.lock'''
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(_lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase = self.old_features['''features''']
_lowerCAmelCase = self.old_features.get('''dataset''' , _lowerCAmelCase )
_lowerCAmelCase = self.old_features.get('''examples''' , _lowerCAmelCase )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowerCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowerCAmelCase , )
_lowerCAmelCase = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , _lowerCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
return len(self.features )
def __getitem__( self , _lowerCAmelCase ):
# Convert to Tensors and build dataset
_lowerCAmelCase = self.features[i]
_lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 702 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "Hello world! cécé herlolip"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]:
_lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
_lowerCAmelCase = roberta.model.encoder.sentence_encoder
_lowerCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight
_lowerCAmelCase = roberta_sent_encoder.embed_positions.weight
_lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowerCAmelCase = roberta_sent_encoder.layer_norm.weight
_lowerCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCAmelCase = model.roberta.encoder.layer[i]
_lowerCAmelCase = roberta_sent_encoder.layers[i]
_lowerCAmelCase = layer.attention
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowerCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowerCAmelCase = roberta_layer.self_attn.q_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.q_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.k_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.k_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.v_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowerCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowerCAmelCase = roberta_layer.self_attn.out_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowerCAmelCase = roberta_layer.final_layer_norm.weight
_lowerCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
_lowerCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# output
_lowerCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0]
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) )
else:
_lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
_lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 664 | 0 |
'''simple docstring'''
from torch import nn
class UpperCAmelCase ( nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__()
_lowerCAmelCase = class_size
_lowerCAmelCase = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_lowerCAmelCase = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
_lowerCAmelCase = self.mlp(_lowerCAmelCase )
return logits
| 703 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = i / num_diffusion_timesteps
_lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ):
if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None:
_lowerCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowerCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCAmelCase = 1.0 - self.betas
_lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCAmelCase = 1.0
# setable values
_lowerCAmelCase = None
_lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
return sample
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_lowerCAmelCase = num_inference_steps
_lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
# 1. get previous step value (=t+1)
_lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCAmelCase = self.alphas_cumprod[timestep]
_lowerCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCAmelCase = model_output
elif self.config.prediction_type == "sample":
_lowerCAmelCase = model_output
_lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 664 | 0 |
UpperCAmelCase_ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
UpperCAmelCase_ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 1_2,
"Pm": 1_5,
"Em": 1_8,
"Zm": 2_1,
"Ym": 2_4,
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float:
_lowerCAmelCase = from_type.lower().strip('''s''' )
_lowerCAmelCase = to_type.lower().strip('''s''' )
_lowerCAmelCase = UNIT_SYMBOL.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = UNIT_SYMBOL.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if from_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(_SCREAMING_SNAKE_CASE )}'''
)
raise ValueError(_SCREAMING_SNAKE_CASE )
if to_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(_SCREAMING_SNAKE_CASE )}'''
)
raise ValueError(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = METRIC_CONVERSION[from_sanitized]
_lowerCAmelCase = METRIC_CONVERSION[to_sanitized]
_lowerCAmelCase = 1
if from_exponent > to_exponent:
_lowerCAmelCase = from_exponent - to_exponent
else:
_lowerCAmelCase = -(to_exponent - from_exponent)
return value * pow(1_0 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCAmelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = DistilBertTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
_lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCAmelCase ) != tokenize_chinese_chars
):
_lowerCAmelCase = getattr(_lowerCAmelCase , normalizer_state.pop('''type''' ) )
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = strip_accents
_lowerCAmelCase = tokenize_chinese_chars
_lowerCAmelCase = normalizer_class(**_lowerCAmelCase )
_lowerCAmelCase = do_lower_case
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowerCAmelCase = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 705 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor'''
SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowerCAmelCase = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowerCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 664 | 0 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
UpperCAmelCase_ = {
"b0": {
"hidden_dim": 1_2_8_0,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 2_2_4,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1_2_8_0,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 2_4_0,
"dropout_rate": 0.2,
"dw_padding": [1_6],
},
"b2": {
"hidden_dim": 1_4_0_8,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 2_6_0,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 1_6],
},
"b3": {
"hidden_dim": 1_5_3_6,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 3_0_0,
"dropout_rate": 0.3,
"dw_padding": [5, 1_8],
},
"b4": {
"hidden_dim": 1_7_9_2,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 3_8_0,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2_0_4_8,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 4_5_6,
"dropout_rate": 0.4,
"dw_padding": [1_3, 2_7],
},
"b6": {
"hidden_dim": 2_3_0_4,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 5_2_8,
"dropout_rate": 0.5,
"dw_padding": [3_1],
},
"b7": {
"hidden_dim": 2_5_6_0,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 6_0_0,
"dropout_rate": 0.5,
"dw_padding": [1_8],
},
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any:
_lowerCAmelCase = EfficientNetConfig()
_lowerCAmelCase = CONFIG_MAP[model_name]['''hidden_dim''']
_lowerCAmelCase = CONFIG_MAP[model_name]['''width_coef''']
_lowerCAmelCase = CONFIG_MAP[model_name]['''depth_coef''']
_lowerCAmelCase = CONFIG_MAP[model_name]['''image_size''']
_lowerCAmelCase = CONFIG_MAP[model_name]['''dropout_rate''']
_lowerCAmelCase = CONFIG_MAP[model_name]['''dw_padding''']
_lowerCAmelCase = '''huggingface/label-files'''
_lowerCAmelCase = '''imagenet-1k-id2label.json'''
_lowerCAmelCase = 1_0_0_0
_lowerCAmelCase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
_lowerCAmelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ ( )->str:
_lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCAmelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] )->int:
_lowerCAmelCase = CONFIG_MAP[model_name]['''image_size''']
_lowerCAmelCase = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str:
_lowerCAmelCase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
_lowerCAmelCase = sorted(set(_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
_lowerCAmelCase = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
_lowerCAmelCase = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
_lowerCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
_lowerCAmelCase = '''efficientnet.''' + item[1]
_lowerCAmelCase = '''classifier.weight'''
_lowerCAmelCase = '''classifier.bias'''
return key_mapping
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple )->Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
_lowerCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
_lowerCAmelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_lowerCAmelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_lowerCAmelCase = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
_lowerCAmelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
_lowerCAmelCase = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights='''imagenet''' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1_0_0_0 , classifier_activation='''softmax''' , )
_lowerCAmelCase = original_model.trainable_variables
_lowerCAmelCase = original_model.non_trainable_variables
_lowerCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_lowerCAmelCase = param.numpy()
_lowerCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
_lowerCAmelCase = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
_lowerCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
_lowerCAmelCase = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
_lowerCAmelCase = convert_image_processor(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
_lowerCAmelCase = hf_model(**_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = outputs.logits.detach().numpy()
# Original model inference
_lowerCAmelCase = False
_lowerCAmelCase = CONFIG_MAP[model_name]['''image_size''']
_lowerCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_lowerCAmelCase = image.img_to_array(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
_lowerCAmelCase = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
_lowerCAmelCase = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
UpperCAmelCase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 706 |
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list:
if len(_SCREAMING_SNAKE_CASE ) == 0:
return []
_lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = int(max_value - min_value ) + 1
_lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 664 | 0 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1_0_0_0 )->int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 707 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCAmelCase__ ( )->Any:
_lowerCAmelCase = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase = get_sagemaker_input()
else:
_lowerCAmelCase = get_cluster_input()
return config
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str:
if subparsers is not None:
_lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str:
_lowerCAmelCase = get_user_input()
if args.config_file is not None:
_lowerCAmelCase = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def UpperCAmelCase__ ( )->List[Any]:
_lowerCAmelCase = config_command_parser()
_lowerCAmelCase = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 664 | 0 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple )->int:
_lowerCAmelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Tuple:
_lowerCAmelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_lowerCAmelCase = s_dict.pop(_SCREAMING_SNAKE_CASE )
elif "subsample" in key:
_lowerCAmelCase = s_dict.pop(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] )->Any:
_lowerCAmelCase , _lowerCAmelCase = emb.weight.shape
_lowerCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple )->str:
_lowerCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
_lowerCAmelCase = mam_aaa['''args''']
_lowerCAmelCase = mam_aaa['''model''']
_lowerCAmelCase = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
rename_keys(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = state_dict['''decoder.embed_tokens.weight'''].shape[0]
_lowerCAmelCase = args.share_decoder_input_output_embed
_lowerCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for i in args.conv_kernel_sizes.split(''',''' )]
_lowerCAmelCase = SpeechaTextConfig(
vocab_size=_SCREAMING_SNAKE_CASE , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(_SCREAMING_SNAKE_CASE ) , conv_channels=args.conv_channels , conv_kernel_sizes=_SCREAMING_SNAKE_CASE , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_SCREAMING_SNAKE_CASE , num_beams=5 , max_length=2_0_0 , use_cache=_SCREAMING_SNAKE_CASE , decoder_start_token_id=2 , early_stopping=_SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = SpeechaTextForConditionalGeneration(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0 and not set(_SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
_lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCAmelCase = lm_head_weights
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 708 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase_ = 1_0
UpperCAmelCase_ = 2_5_6
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]:
if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE )
for token in set(_SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]:
return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class UpperCAmelCase :
def __init__( self , *,
_lowerCAmelCase = 0.85 , ):
_lowerCAmelCase = duplication_jaccard_threshold
_lowerCAmelCase = NUM_PERM
_lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase = defaultdict(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self._index.query(_lowerCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase = [base] + list(_lowerCAmelCase )
# reformat the cluster to be a list of dict
_lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_lowerCAmelCase )
return duplicate_clusters
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.get_duplicate_clusters()
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = element
_lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str:
_lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ):
di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float:
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]:
_lowerCAmelCase = []
for elementa in cluster:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase = 1
extremes.append(_SCREAMING_SNAKE_CASE )
return extremes
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple:
global _shared_dataset
_lowerCAmelCase = dataset
_lowerCAmelCase = []
_lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(_SCREAMING_SNAKE_CASE )
return extremes_list
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase = {}
_lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase = element
_lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
return ds_filter, duplicate_clusters
| 664 | 0 |
import torch
from transformers import AutoModel
class UpperCAmelCase ( torch.nn.Module ):
def __init__( self , _lowerCAmelCase="sayef/fsner-bert-base-uncased" ):
super(_lowerCAmelCase , self ).__init__()
_lowerCAmelCase = AutoModel.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
_lowerCAmelCase = torch.nn.CosineSimilarity(3 , 1E-08 )
_lowerCAmelCase = torch.nn.Softmax(dim=1 )
def __lowerCAmelCase ( self , **_lowerCAmelCase ):
return self.bert(**_lowerCAmelCase ).last_hidden_state
def __lowerCAmelCase ( self , _lowerCAmelCase ):
return token_embeddings.sum(2 , keepdim=_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1 ):
return self.softmax(T * self.cos(_lowerCAmelCase , _lowerCAmelCase ) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = W_supports['''sizes'''].tolist()
_lowerCAmelCase = W_supports['''start_token_id'''].item()
_lowerCAmelCase = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_lowerCAmelCase = self.BERT(**_lowerCAmelCase )
_lowerCAmelCase = self.BERT(**_lowerCAmelCase )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = W_supports['''input_ids'''] == start_token_id
_lowerCAmelCase = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(_lowerCAmelCase ):
if i == 0:
_lowerCAmelCase = 0
else:
_lowerCAmelCase = support_sizes[i - 1]
_lowerCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
_lowerCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
_lowerCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_lowerCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_lowerCAmelCase = torch.vstack((p_starts, p_start) )
_lowerCAmelCase = torch.vstack((p_ends, p_end) )
else:
_lowerCAmelCase = p_start
_lowerCAmelCase = p_end
return p_starts, p_ends
| 709 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = process
_lowerCAmelCase = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
_lowerCAmelCase = self.dataset[i]
_lowerCAmelCase = self.process(_lowerCAmelCase , **self.params )
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowerCAmelCase = loader
_lowerCAmelCase = infer
_lowerCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowerCAmelCase = None
_lowerCAmelCase = loader_batch_size
# Internal bookkeeping
_lowerCAmelCase = None
_lowerCAmelCase = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowerCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowerCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Convert ModelOutput to tuple first
_lowerCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowerCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowerCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowerCAmelCase = next(self.iterator )
_lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_lowerCAmelCase = processed
_lowerCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
_lowerCAmelCase = None
return self
def __lowerCAmelCase ( self ):
if self.subiterator is None:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_lowerCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
_lowerCAmelCase = next(self.subiterator )
return processed
class UpperCAmelCase ( snake_case_ ):
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowerCAmelCase = False
_lowerCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
_lowerCAmelCase = processed
_lowerCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
else:
_lowerCAmelCase = processed
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
return accumulator
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return self.dataset[i][self.key]
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = keya
_lowerCAmelCase = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 664 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
UpperCAmelCase_ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any )->Optional[Any]:
for attribute in key.split('''.''' ):
_lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
_lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any )->List[str]:
_lowerCAmelCase = []
_lowerCAmelCase = fairseq_model.state_dict()
_lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(_SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
_lowerCAmelCase = mapped_key.replace('''*''' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_lowerCAmelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCAmelCase = '''weight_v'''
elif "bias" in name:
_lowerCAmelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase = '''weight'''
else:
_lowerCAmelCase = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int )->str:
_lowerCAmelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCAmelCase = name.split('''.''' )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : List[Any]=True )->Any:
if config_path is not None:
_lowerCAmelCase = UniSpeechSatConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = UniSpeechSatConfig()
_lowerCAmelCase = ''''''
if is_finetuned:
_lowerCAmelCase = UniSpeechSatForCTC(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = UniSpeechSatForPreTraining(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
_lowerCAmelCase = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCAmelCase_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 710 |
import numpy
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
_lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCAmelCase = numpy.zeros(output_array.shape )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __lowerCAmelCase ( self ):
_lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
for iteration in range(1 , iterations + 1 ):
_lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = input_arr
_lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return (value) * (1 - (value))
def UpperCAmelCase__ ( )->int:
_lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=1_0 , give_loss=_SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = image.size
_lowerCAmelCase , _lowerCAmelCase = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
_lowerCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
_lowerCAmelCase = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
_lowerCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE )
return 2.0 * image - 1.0
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
super().__init__()
self.register_modules(vqvae=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self , _lowerCAmelCase = None , _lowerCAmelCase = 1 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , ):
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
_lowerCAmelCase = 1
elif isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_lowerCAmelCase )}''' )
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
_lowerCAmelCase = preprocess(_lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_lowerCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_lowerCAmelCase = next(self.unet.parameters() ).dtype
_lowerCAmelCase = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
_lowerCAmelCase = image.to(device=self.device , dtype=_lowerCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_lowerCAmelCase , device=self.device )
_lowerCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase = {}
if accepts_eta:
_lowerCAmelCase = eta
for t in self.progress_bar(_lowerCAmelCase ):
# concat latents and low resolution image in the channel dimension.
_lowerCAmelCase = torch.cat([latents, image] , dim=1 )
_lowerCAmelCase = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
_lowerCAmelCase = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
# decode the image latents with the VQVAE
_lowerCAmelCase = self.vqvae.decode(_lowerCAmelCase ).sample
_lowerCAmelCase = torch.clamp(_lowerCAmelCase , -1.0 , 1.0 )
_lowerCAmelCase = image / 2 + 0.5
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_attention_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_choices
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_attention_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCAmelCase ( self ):
_lowerCAmelCase = FlaxAlbertModelTester(self )
@slow
def __lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase = model_class_name.from_pretrained('''albert-base-v2''' )
_lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_lowerCAmelCase = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_lowerCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
_lowerCAmelCase = (1, 11, 768)
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
| 712 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool:
_lowerCAmelCase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]:
if function is None:
return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = starting_batch_size
def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() )
# Guard against user error
if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1):
_lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except Exception as e:
if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 664 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = 300
return config
def __lowerCAmelCase ( self ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = True
_lowerCAmelCase = MraModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = ()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def __lowerCAmelCase ( self ):
return
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 664 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ = '''BlipImageProcessor'''
SCREAMING_SNAKE_CASE__ = '''AutoTokenizer'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.image_processor
def __call__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = True , _lowerCAmelCase = None , **_lowerCAmelCase , ):
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_lowerCAmelCase = self.tokenizer
_lowerCAmelCase = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
# add pixel_values
_lowerCAmelCase = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
if text is not None:
_lowerCAmelCase = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
else:
_lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 664 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCAmelCase_ = "\\n Text data.\n Second line of data."
UpperCAmelCase_ = "file"
@pytest.fixture(scope='''session''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->str:
_lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_lowerCAmelCase = bytes(_SCREAMING_SNAKE_CASE , '''utf-8''' )
with zstd.open(_SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->int:
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] )->Optional[Any]:
_lowerCAmelCase = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_lowerCAmelCase = input_paths[compression_format]
_lowerCAmelCase = tmp_path / '''cache'''
_lowerCAmelCase = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ) as f:
_lowerCAmelCase = f.read()
with open(_SCREAMING_SNAKE_CASE ) as f:
_lowerCAmelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple )->int:
_lowerCAmelCase = '''custom_cache'''
_lowerCAmelCase = '''custom_extracted_dir'''
_lowerCAmelCase = tmp_path / '''custom_extracted_path'''
if default_extracted:
_lowerCAmelCase = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCAmelCase = xz_file
_lowerCAmelCase = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE )
)
_lowerCAmelCase = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] )->Union[str, Any]:
# absolute path
_lowerCAmelCase = str(Path(_SCREAMING_SNAKE_CASE ).resolve() )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
# relative path
_lowerCAmelCase = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->int:
# absolute path
_lowerCAmelCase = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
# relative path
_lowerCAmelCase = '''./__missing_file__.txt'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->Tuple:
_lowerCAmelCase = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(_SCREAMING_SNAKE_CASE ) as f:
_lowerCAmelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( )->Optional[int]:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->Optional[int]:
_lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_get('''https://huggingface.co''' , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->List[Any]:
_lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_get('''ftp://huggingface.co''' , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Any:
_lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_get('''s3://huggingface.co''' , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_head('''s3://huggingface.co''' )
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ):
_lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18}
_lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_frames
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = crop_size
def __lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
_lowerCAmelCase = VivitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 664 | 0 |
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : Optional[int] )->str:
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
_lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __SCREAMING_SNAKE_CASE ( )->List[Any]:
_lowerCAmelCase = 1
_lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(_SCREAMING_SNAKE_CASE ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 716 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ):
_lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ):
_lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int:
_lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]:
_lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCAmelCase = scount * numref
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCAmelCase = ccount * numref
# KEEP
_lowerCAmelCase = sgramcounter_rep & cgramcounter_rep
_lowerCAmelCase = keepgramcounter_rep & rgramcounter
_lowerCAmelCase = sgramcounter_rep & rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCAmelCase = sgramcounter_rep - cgramcounter_rep
_lowerCAmelCase = delgramcounter_rep - rgramcounter
_lowerCAmelCase = sgramcounter_rep - rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE )
# ADDITION
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]:
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = ssent.split(''' ''' )
_lowerCAmelCase = csent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for rsent in rsents:
_lowerCAmelCase = rsent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
_lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
_lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sentence
if not return_str:
_lowerCAmelCase = normalized_sent.split()
return normalized_sent
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str:
if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_lowerCAmelCase = 0
for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] )
_lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE )
return 1_0_0 * sari_score
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str:
_lowerCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
_lowerCAmelCase = sacrebleu.corpus_bleu(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = {}
result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
return result
| 664 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
UpperCAmelCase_ = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
UpperCAmelCase_ = {
"ctrl": 2_5_6,
}
UpperCAmelCase_ = {
"Pregnancy": 1_6_8_6_2_9,
"Christianity": 7_6_7_5,
"Explain": 1_0_6_4_2_3,
"Fitness": 6_3_4_4_0,
"Saving": 6_3_1_6_3,
"Ask": 2_7_1_7_1,
"Ass": 9_5_9_8_5,
"Joke": 1_6_3_5_0_9,
"Questions": 4_5_6_2_2,
"Thoughts": 4_9_6_0_5,
"Retail": 5_2_3_4_2,
"Feminism": 1_6_4_3_3_8,
"Writing": 1_1_9_9_2,
"Atheism": 1_9_2_2_6_3,
"Netflix": 4_8_6_1_6,
"Computing": 3_9_6_3_9,
"Opinion": 4_3_2_1_3,
"Alone": 4_4_9_6_7,
"Funny": 5_8_9_1_7,
"Gaming": 4_0_3_5_8,
"Human": 4_0_8_8,
"India": 1_3_3_1,
"Joker": 7_7_1_3_8,
"Diet": 3_6_2_0_6,
"Legal": 1_1_8_5_9,
"Norman": 4_9_3_9,
"Tip": 7_2_6_8_9,
"Weight": 5_2_3_4_3,
"Movies": 4_6_2_7_3,
"Running": 2_3_4_2_5,
"Science": 2_0_9_0,
"Horror": 3_7_7_9_3,
"Confession": 6_0_5_7_2,
"Finance": 1_2_2_5_0,
"Politics": 1_6_3_6_0,
"Scary": 1_9_1_9_8_5,
"Support": 1_2_6_5_4,
"Technologies": 3_2_5_1_6,
"Teenage": 6_6_1_6_0,
"Event": 3_2_7_6_9,
"Learned": 6_7_4_6_0,
"Notion": 1_8_2_7_7_0,
"Wikipedia": 3_7_5_8_3,
"Books": 6_6_6_5,
"Extract": 7_6_0_5_0,
"Confessions": 1_0_2_7_0_1,
"Conspiracy": 7_5_9_3_2,
"Links": 6_3_6_7_4,
"Narcissus": 1_5_0_4_2_5,
"Relationship": 5_4_7_6_6,
"Relationships": 1_3_4_7_9_6,
"Reviews": 4_1_6_7_1,
"News": 4_2_5_6,
"Translation": 2_6_8_2_0,
"multilingual": 1_2_8_4_0_6,
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple )->Union[str, Any]:
_lowerCAmelCase = set()
_lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase = char
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE )
return pairs
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = CONTROL_CODES
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="<unk>" , **_lowerCAmelCase ):
super().__init__(unk_token=_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_lowerCAmelCase = json.load(_lowerCAmelCase )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_lowerCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
_lowerCAmelCase = [tuple(merge.split() ) for merge in merges]
_lowerCAmelCase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_lowerCAmelCase = {}
@property
def __lowerCAmelCase ( self ):
return len(self.encoder )
def __lowerCAmelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
if token in self.cache:
return self.cache[token]
_lowerCAmelCase = tuple(_lowerCAmelCase )
_lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_lowerCAmelCase = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
_lowerCAmelCase = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase = bigram
_lowerCAmelCase = []
_lowerCAmelCase = 0
while i < len(_lowerCAmelCase ):
try:
_lowerCAmelCase = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase = tuple(_lowerCAmelCase )
_lowerCAmelCase = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
_lowerCAmelCase = get_pairs(_lowerCAmelCase )
_lowerCAmelCase = '''@@ '''.join(_lowerCAmelCase )
_lowerCAmelCase = word[:-4]
_lowerCAmelCase = word
return word
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = []
_lowerCAmelCase = re.findall(R'''\S+\n?''' , _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def __lowerCAmelCase ( self , _lowerCAmelCase ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = ''' '''.join(_lowerCAmelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + '''\n''' )
_lowerCAmelCase = 0
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_lowerCAmelCase = token_index
writer.write(''' '''.join(_lowerCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 717 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DeiTFeatureExtractor"]
UpperCAmelCase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger("transformers.models.speecht5")
UpperCAmelCase_ = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
UpperCAmelCase_ = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
UpperCAmelCase_ = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
UpperCAmelCase_ = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
UpperCAmelCase_ = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
UpperCAmelCase_ = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
UpperCAmelCase_ = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
UpperCAmelCase_ = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
UpperCAmelCase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCAmelCase_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase_ = []
UpperCAmelCase_ = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
UpperCAmelCase_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
UpperCAmelCase_ = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
UpperCAmelCase_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int )->Tuple:
for attribute in key.split('''.''' ):
_lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
_lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
elif weight_type == "running_mean":
_lowerCAmelCase = value
elif weight_type == "running_var":
_lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] )->Dict:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowerCAmelCase , _lowerCAmelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->int:
_lowerCAmelCase = []
if task == "s2t":
_lowerCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCAmelCase = MAPPING_S2T
_lowerCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
_lowerCAmelCase = None
_lowerCAmelCase = MAPPING_T2S
_lowerCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
_lowerCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCAmelCase = MAPPING_S2S
_lowerCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(f'''{name} was ignored''' )
continue
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_lowerCAmelCase , _lowerCAmelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
_lowerCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(_SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
_lowerCAmelCase = mapped_key.replace('''*''' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_lowerCAmelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCAmelCase = '''weight_v'''
elif "bias" in name:
_lowerCAmelCase = '''bias'''
elif "weight" in name:
_lowerCAmelCase = '''weight'''
elif "running_mean" in name:
_lowerCAmelCase = '''running_mean'''
elif "running_var" in name:
_lowerCAmelCase = '''running_var'''
elif "num_batches_tracked" in name:
_lowerCAmelCase = '''num_batches_tracked'''
else:
_lowerCAmelCase = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str )->List[str]:
_lowerCAmelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCAmelCase = name.split('''.''' )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Tuple=None , )->Optional[Any]:
if config_path is not None:
_lowerCAmelCase = SpeechTaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = SpeechTaConfig()
if task == "s2t":
_lowerCAmelCase = config.max_text_positions
_lowerCAmelCase = SpeechTaForSpeechToText(_SCREAMING_SNAKE_CASE )
elif task == "t2s":
_lowerCAmelCase = 1_8_7_6
_lowerCAmelCase = 6_0_0
_lowerCAmelCase = config.max_speech_positions
_lowerCAmelCase = SpeechTaForTextToSpeech(_SCREAMING_SNAKE_CASE )
elif task == "s2s":
_lowerCAmelCase = 1_8_7_6
_lowerCAmelCase = config.max_speech_positions
_lowerCAmelCase = SpeechTaForSpeechToSpeech(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
_lowerCAmelCase = SpeechTaTokenizer(_SCREAMING_SNAKE_CASE , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken('''<mask>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_lowerCAmelCase = SpeechTaFeatureExtractor()
_lowerCAmelCase = SpeechTaProcessor(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = torch.load(_SCREAMING_SNAKE_CASE )
recursively_load_weights(fairseq_checkpoint['''model'''] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCAmelCase_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 718 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
_lowerCAmelCase = [0] * n
_lowerCAmelCase = [False] * n
_lowerCAmelCase = [False] * n
def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase = True
_lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase = True
else:
_lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
_lowerCAmelCase = 0
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
UpperCAmelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 664 | 0 |
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase_ = ""
UpperCAmelCase_ = ""
UpperCAmelCase_ = ""
UpperCAmelCase_ = ""
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->None:
# authorize twitter, initialize tweepy
_lowerCAmelCase = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
_lowerCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_lowerCAmelCase = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=2_0_0 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
_lowerCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_lowerCAmelCase = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=2_0_0 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
_lowerCAmelCase = alltweets[-1].id - 1
print(f'''...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_lowerCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
_lowerCAmelCase = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 719 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase ( snake_case_ ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = '''pt'''
_lowerCAmelCase = '''tf'''
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase )
model_tf.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 664 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Tuple:
def decorator(_SCREAMING_SNAKE_CASE : List[Any] ):
_lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , '''handle_key''' , [] )
handle += [key]
setattr(_SCREAMING_SNAKE_CASE , '''handle_key''' , _SCREAMING_SNAKE_CASE )
return func
return decorator
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : List[str] )->Tuple:
def decorator(_SCREAMING_SNAKE_CASE : Optional[Any] ):
_lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , '''handle_key''' , [] )
handle += keys
setattr(_SCREAMING_SNAKE_CASE , '''handle_key''' , _SCREAMING_SNAKE_CASE )
return func
return decorator
class UpperCAmelCase ( snake_case_ ):
def __new__( cls , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = super().__new__(cls , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not hasattr(_lowerCAmelCase , '''key_handler''' ):
setattr(_lowerCAmelCase , '''key_handler''' , {} )
setattr(_lowerCAmelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_lowerCAmelCase = getattr(_lowerCAmelCase , '''handle_key''' , [] )
for key in handled_keys:
_lowerCAmelCase = value
return new_cls
@staticmethod
def __lowerCAmelCase ( cls ):
_lowerCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_lowerCAmelCase = ord(_lowerCAmelCase )
_lowerCAmelCase = cls.key_handler.get(_lowerCAmelCase )
if handler:
_lowerCAmelCase = char
return handler(cls )
else:
return None
def UpperCAmelCase__ ( cls : Union[str, Any] )->Optional[Any]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 720 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = DiTPipeline
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('''mps''' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''cpu'''
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCAmelCase ( self ):
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 664 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCAmelCase_ = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
UpperCAmelCase_ = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
UpperCAmelCase_ = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="auto" , _lowerCAmelCase=-1 , _lowerCAmelCase=0.9 , _lowerCAmelCase=5 , _lowerCAmelCase=500 , _lowerCAmelCase="gpt2-large" , _lowerCAmelCase=-1 , _lowerCAmelCase=1_024 , _lowerCAmelCase=25 , _lowerCAmelCase=5 , _lowerCAmelCase=True , _lowerCAmelCase=25 , ):
_lowerCAmelCase = compute_mauve(
p_text=_lowerCAmelCase , q_text=_lowerCAmelCase , p_features=_lowerCAmelCase , q_features=_lowerCAmelCase , p_tokens=_lowerCAmelCase , q_tokens=_lowerCAmelCase , num_buckets=_lowerCAmelCase , pca_max_data=_lowerCAmelCase , kmeans_explained_var=_lowerCAmelCase , kmeans_num_redo=_lowerCAmelCase , kmeans_max_iter=_lowerCAmelCase , featurize_model_name=_lowerCAmelCase , device_id=_lowerCAmelCase , max_text_length=_lowerCAmelCase , divergence_curve_discretization_size=_lowerCAmelCase , mauve_scaling_factor=_lowerCAmelCase , verbose=_lowerCAmelCase , seed=_lowerCAmelCase , )
return out
| 721 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ = {"UserAgent": UserAgent().random}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict:
_lowerCAmelCase = script.contents[0]
_lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = F'''https://www.instagram.com/{username}/'''
_lowerCAmelCase = self.get_json()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text
_lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __lowerCAmelCase ( self ):
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_private"]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = InstagramUser("github")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 664 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.