code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A : Tuple = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _UpperCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE:Tuple = PegasusConfig
SCREAMING_SNAKE_CASE:Tuple = {}
SCREAMING_SNAKE_CASE:int = "gelu"
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , ):
"""simple docstring"""
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = eos_token_id
a__ = pad_token_id
a__ = bos_token_id
def lowercase__ ( self ):
"""simple docstring"""
a__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
a__ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
a__ = np.concatenate([input_ids, eos_tensor] , axis=1 )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a__ = prepare_pegasus_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, inputs_dict
def lowercase__ ( self , _a , _a , _a ):
"""simple docstring"""
a__ = 20
a__ = model_class_name(lowerCamelCase_ )
a__ = model.encode(inputs_dict['input_ids'] )
a__ , a__ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a__ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
a__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
a__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a__ = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
a__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
a__ = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase_ , )
a__ = model.decode(lowerCamelCase_ , lowerCamelCase_ )
a__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def lowercase__ ( self , _a , _a , _a ):
"""simple docstring"""
a__ = 20
a__ = model_class_name(lowerCamelCase_ )
a__ = model.encode(inputs_dict['input_ids'] )
a__ , a__ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
a__ = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
a__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a__ = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
a__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
a__ = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
a__ = model.decode(lowerCamelCase_ , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ )
a__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def lowerCAmelCase_ ( a : Dict , a : Dict , a : Any , a : str=None , a : str=None , ):
if attention_mask is None:
a__ = np.not_equal(a , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
a__ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _UpperCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Union[str, Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE:str = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
SCREAMING_SNAKE_CASE:Optional[int] = True
SCREAMING_SNAKE_CASE:List[Any] = False
SCREAMING_SNAKE_CASE:Union[str, Any] = False
SCREAMING_SNAKE_CASE:Any = False
def lowercase__ ( self ):
"""simple docstring"""
a__ = FlaxPegasusModelTester(self )
a__ = ConfigTester(self , config_class=lowerCamelCase_ )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowercase__ ( self ):
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowercase__ ( self ):
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
a__ = model_class(lowerCamelCase_ )
@jax.jit
def encode_jitted(_a , _a=None , **_a ):
return model.encode(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
with self.subTest('JIT Enabled' ):
a__ = encode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a__ = encode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self ):
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a__ = model_class(lowerCamelCase_ )
a__ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
a__ = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_a , _a , _a ):
return model.decode(
decoder_input_ids=lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , encoder_outputs=lowerCamelCase_ , )
with self.subTest('JIT Enabled' ):
a__ = decode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a__ = decode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
a__ = model_class_name.from_pretrained('google/pegasus-large' , from_pt=lowerCamelCase_ )
a__ = np.ones((1, 1) )
a__ = model(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
a__ = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
a__ = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
a__ = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
a__ = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
a__ = tokenizer(lowerCamelCase_ , return_tensors='np' , truncation=lowerCamelCase_ , max_length=512 , padding=lowerCamelCase_ )
a__ = model.generate(**lowerCamelCase_ , num_beams=2 ).sequences
a__ = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
assert tgt_text == decoded
| 710 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__A : Optional[Any] = logging.getLogger()
def lowerCAmelCase_ ( a : str ):
a__ = {}
a__ = os.path.join(a , 'all_results.json' )
if os.path.exists(a ):
with open(a , 'r' ) as f:
a__ = json.load(a )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
__A : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
import xla_spawn
a__ = self.get_auto_remove_tmp_dir()
a__ = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_a , 'argv' , _a ):
a__ = time()
xla_spawn.main()
a__ = time()
a__ = get_results(_a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def lowercase__ ( self ):
"""simple docstring"""
import xla_spawn
a__ = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_a , 'argv' , _a ):
xla_spawn.main()
| 126 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case: Any = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: Optional[Any] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__snake_case: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 577 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case: Optional[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = "timm_backbone"
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
a_ : Optional[Any] = backbone
a_ : Union[str, Any] = num_channels
a_ : str = features_only
a_ : Any = use_pretrained_backbone
a_ : Tuple = True
a_ : Tuple = out_indices if out_indices is not None else (-1,)
| 577 | 1 |
import random
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE_ = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE_ = random.randrange(2, num - 1 )
SCREAMING_SNAKE_CASE_ = pow(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if v != 1:
SCREAMING_SNAKE_CASE_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE_ = i + 1
SCREAMING_SNAKE_CASE_ = (v**2) % num
return True
def A__ ( __lowerCamelCase ):
if num < 2:
return False
SCREAMING_SNAKE_CASE_ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowerCamelCase )
def A__ ( __lowerCamelCase = 10_24 ):
while True:
SCREAMING_SNAKE_CASE_ = random.randrange(2 ** (keysize - 1), 2 ** (keysize) )
if is_prime_low_num(__lowerCamelCase ):
return num
if __name__ == "__main__":
__UpperCAmelCase = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 713 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A=None , _A=True , _A=None , **_A ) -> Any:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = config_class
SCREAMING_SNAKE_CASE_ = has_text_modality
SCREAMING_SNAKE_CASE_ = kwargs
SCREAMING_SNAKE_CASE_ = common_properties
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE_ = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_A , _A ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(_A ):
try:
setattr(_A , _A , _A )
self.parent.assertEqual(
getattr(_A , _A ) , _A , msg=F'''`{name} value {idx} expected, but was {getattr(_A , _A )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_A ):
try:
SCREAMING_SNAKE_CASE_ = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_A , _A ) , _A , msg=F'''`{name} value {idx} expected, but was {getattr(_A , _A )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE_ = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _A )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = os.path.join(_A , '''config.json''' )
config_first.to_json_file(_A )
SCREAMING_SNAKE_CASE_ = self.config_class.from_json_file(_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = self.config_class.from_pretrained(_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE_ = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = os.path.join(_A , _A )
config_first.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = self.config_class.from_pretrained(_A , subfolder=_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
SCREAMING_SNAKE_CASE_ = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _UpperCamelCase ( self ) -> List[Any]:
if self.config_class.is_composition:
return
SCREAMING_SNAKE_CASE_ = self.config_class()
self.parent.assertIsNotNone(_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(_A )
SCREAMING_SNAKE_CASE_ = self.config_class(**_A )
SCREAMING_SNAKE_CASE_ = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(_A , _A ) != value:
wrong_values.append((key, getattr(_A , _A ), value) )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = '''\n'''.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def _UpperCamelCase ( self ) -> int:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 597 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__A , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
SCREAMING_SNAKE_CASE : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__A , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__A , metadata={"help": "The input training data file (a text file)."} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__A , metadata={"help": "Overwrite the cached training and evaluation sets"} )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__A , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__A , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__A , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def snake_case__( self : List[str] ) ->str:
if self.train_file is not None:
snake_case_ = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
snake_case_ = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
def __call__( self : Tuple , _UpperCamelCase : Union[str, Any] ) ->int:
snake_case_ = '''label''' if '''label''' in features[0].keys() else '''labels'''
snake_case_ = [feature.pop(_UpperCamelCase ) for feature in features]
snake_case_ = len(_UpperCamelCase )
snake_case_ = len(features[0]['''input_ids'''] )
snake_case_ = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCamelCase )] for feature in features
]
snake_case_ = list(chain(*_UpperCamelCase ) )
snake_case_ = self.tokenizer.pad(
_UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
snake_case_ = {k: v.view(_UpperCamelCase , _UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
snake_case_ = torch.tensor(_UpperCamelCase , dtype=torch.intaa )
return batch
def __SCREAMING_SNAKE_CASE ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_, snake_case_, snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_, snake_case_, snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
snake_case_ = {}
if data_args.train_file is not None:
snake_case_ = data_args.train_file
if data_args.validation_file is not None:
snake_case_ = data_args.validation_file
snake_case_ = data_args.train_file.split('''.''' )[-1]
snake_case_ = load_dataset(
SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
snake_case_ = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
snake_case_ = [F'''ending{i}''' for i in range(4 )]
snake_case_ = '''sent1'''
snake_case_ = '''sent2'''
if data_args.max_seq_length is None:
snake_case_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
snake_case_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
snake_case_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE__ ):
snake_case_ = [[context] * 4 for context in examples[context_name]]
snake_case_ = examples[question_header_name]
snake_case_ = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE__ )
]
# Flatten out
snake_case_ = list(chain(*SCREAMING_SNAKE_CASE__ ) )
snake_case_ = list(chain(*SCREAMING_SNAKE_CASE__ ) )
# Tokenize
snake_case_ = tokenizer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
snake_case_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
snake_case_ = min(len(SCREAMING_SNAKE_CASE__ ) , data_args.max_train_samples )
snake_case_ = train_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
snake_case_ = train_dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
snake_case_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
snake_case_ = min(len(SCREAMING_SNAKE_CASE__ ) , data_args.max_eval_samples )
snake_case_ = eval_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
snake_case_ = eval_dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
snake_case_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE__ ):
snake_case_, snake_case_ = eval_predictions
snake_case_ = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
snake_case_ = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
snake_case_ = min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('''train''' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''train''' , SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ = trainer.evaluate()
snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ )
snake_case_ = min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 39 |
def __A(lowerCAmelCase ) -> bool:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
_UpperCamelCase = str(lowerCAmelCase )
_UpperCamelCase = """""".join(sorted(lowerCAmelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __A(lowerCAmelCase = 9_9 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
_UpperCamelCase = 0
_UpperCamelCase = 1
while True:
if check_bouncy(lowerCAmelCase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 612 | 0 |
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A : Optional[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase__ )
if number < 1:
A : str = f'''Input value of [number={number}] must be > 0'''
raise ValueError(lowerCamelCase__ )
A : Optional[Any] = 1
for i in range(1 , lowerCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
def __UpperCamelCase ( _lowerCAmelCase ) -> list:
"""simple docstring"""
def merge(_lowerCAmelCase , _lowerCAmelCase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowerCAmelCase ) <= 1:
return collection
A : int = len(_lowerCAmelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_:Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE_:Tuple = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 520 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class __magic_name__ ( datasets.BuilderConfig ):
__A : int = 1_00_00
__A : Optional[List[str]] = None
__A : Optional[datasets.Features] = None
class __magic_name__ ( datasets.ArrowBasedBuilder ):
__A : Any = ParquetConfig
def __snake_case ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self : int , snake_case__ : Tuple ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowercase :Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case__ , (str, list, tuple) ):
lowercase :str = data_files
if isinstance(snake_case__ , snake_case__ ):
lowercase :Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase :Dict = [dl_manager.iter_files(snake_case__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase :str = []
for split_name, files in data_files.items():
if isinstance(snake_case__ , snake_case__ ):
lowercase :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase :List[str] = [dl_manager.iter_files(snake_case__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(snake_case__ ):
with open(snake_case__ , '''rb''' ) as f:
lowercase :Optional[int] = datasets.Features.from_arrow_schema(pq.read_schema(snake_case__ ) )
break
splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={'''files''': files} ) )
return splits
def __snake_case ( self : str , snake_case__ : pa.Table ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase :List[Any] = table_cast(snake_case__ , self.info.features.arrow_schema )
return pa_table
def __snake_case ( self : Any , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :List[Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ):
with open(snake_case__ , '''rb''' ) as f:
lowercase :int = pq.ParquetFile(snake_case__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowercase :List[str] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(snake_case__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(snake_case__ )}: {e}""" )
raise
| 677 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 1 |
'''simple docstring'''
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
a__ = val
a__ = None
a__ = None
def lowercase__ ( self , _a ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
a__ = Node(_a )
else:
self.left.insert(_a )
elif val > self.val:
if self.right is None:
a__ = Node(_a )
else:
self.right.insert(_a )
else:
a__ = val
def lowerCAmelCase_ ( a : Dict , a : Union[str, Any] ):
# Recursive traversal
if root:
inorder(root.left , a )
res.append(root.val )
inorder(root.right , a )
def lowerCAmelCase_ ( a : List[str] ):
# Build BST
if len(a ) == 0:
return arr
a__ = Node(arr[0] )
for i in range(1 , len(a ) ):
root.insert(arr[i] )
# Traverse BST in order.
a__ = []
inorder(a , a )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 126 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__A : Any = '\nHuman: <<task>>\n\nAssistant: '
__A : int = 'huggingface-tools/default-prompts'
__A : Tuple = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def lowerCAmelCase_ ( a : int , a : Optional[Any] , a : Tuple="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , a ) is not None:
return prompt_or_repo_id
a__ = cached_file(
a , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(a , 'r' , encoding='utf-8' ) as f:
return f.read()
| 126 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
class UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self , lowerCamelCase = 20_00 , lowerCamelCase = 0.15 , lowerCamelCase = 0.01 , lowerCamelCase = 1348.0 , lowerCamelCase = 1e-5 , lowerCamelCase = 1 , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Tuple = sigma_max
# setable values
UpperCamelCase : Tuple = None
self.set_sigmas(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Any:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase : List[str] = torch.linspace(1 , lowerCamelCase , lowerCamelCase , device=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : List[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase : List[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase , lowerCamelCase )
UpperCamelCase : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase : str = torch.exp(torch.linspace(math.log(lowerCamelCase ) , math.log(lowerCamelCase ) , lowerCamelCase ) )
UpperCamelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> str:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , ) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
UpperCamelCase : Union[str, Any] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase : Union[str, Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase : str = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase : List[str] = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase : Union[str, Any] = self.get_adjacent_sigma(lowerCamelCase , lowerCamelCase ).to(sample.device )
UpperCamelCase : str = torch.zeros_like(lowerCamelCase )
UpperCamelCase : str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase : Optional[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase : Optional[Any] = diffusion.unsqueeze(-1 )
UpperCamelCase : Union[str, Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase : Union[str, Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCamelCase , device=sample.device , dtype=sample.dtype )
UpperCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase : List[str] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase , prev_sample_mean=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase : Dict = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase : Union[str, Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase : int = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase : Optional[Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase : str = step_size.unsqueeze(-1 )
UpperCamelCase : List[str] = sample + step_size * model_output
UpperCamelCase : Union[str, Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> torch.FloatTensor:
'''simple docstring'''
UpperCamelCase : int = timesteps.to(original_samples.device )
UpperCamelCase : int = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase : str = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
UpperCamelCase : Optional[Any] = noise + original_samples
return noisy_samples
def __len__( self ) -> str:
'''simple docstring'''
return self.config.num_train_timesteps
| 173 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def A__ ( A : Optional[int]):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def A__ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Tuple = "mock-s3-bucket"
UpperCamelCase : List[str] = F'''s3://{mock_bucket}'''
UpperCamelCase : Optional[Any] = extract_path_from_uri(A)
assert dataset_path.startswith("s3://") is False
UpperCamelCase : Any = "./local/path"
UpperCamelCase : str = extract_path_from_uri(A)
assert dataset_path == new_dataset_path
def A__ ( A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : List[Any] = is_remote_filesystem(A)
assert is_remote is True
UpperCamelCase : Tuple = fsspec.filesystem("file")
UpperCamelCase : int = is_remote_filesystem(A)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , A)
def A__ ( A : List[Any] , A : Any , A : str , A : Union[str, Any] , A : List[str] , A : List[Any] , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : List[str] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCamelCase : Any = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCamelCase : Any = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(A)
UpperCamelCase : List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=A)
assert isinstance(A , A)
UpperCamelCase : List[Any] = os.path.basename(A)
UpperCamelCase : Union[str, Any] = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(A , "r" , encoding="utf-8") as f, open(A , encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"])
def A__ ( A : Optional[int] , A : str , A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : Any = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCamelCase : str = compressed_file_paths[protocol]
UpperCamelCase : Optional[int] = "dataset.jsonl"
UpperCamelCase : Tuple = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
UpperCamelCase , *UpperCamelCase : Dict = fsspec.get_fs_token_paths(A)
assert fs.isfile(A)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def A__ ( A : Dict , A : List[str] , A : Dict , A : List[Any]):
'''simple docstring'''
UpperCamelCase : Optional[int] = hf_api.dataset_info(A , token=A)
UpperCamelCase : List[str] = HfFileSystem(repo_info=A , token=A)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(A) as f:
assert hffs.open("data/text_data.txt" , "r").read() == f.read()
def A__ ( ):
'''simple docstring'''
UpperCamelCase : str = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(A , A , clobber=A)
with pytest.warns(A) as warning_info:
importlib.reload(datasets.filesystems)
assert len(A) == 1
assert (
str(warning_info[0].message)
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 173 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase : List[Any] = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__UpperCamelCase : str = {
"RUCAIBox/mvp": 1_0_2_4,
}
class UpperCAmelCase_ ( lowercase__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = MvpTokenizer
def __init__( self : int , _lowercase : Dict=None , _lowercase : Union[str, Any]=None , _lowercase : List[Any]=None , _lowercase : Any="replace" , _lowercase : List[str]="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Any="</s>" , _lowercase : Optional[int]="<s>" , _lowercase : int="<unk>" , _lowercase : Optional[int]="<pad>" , _lowercase : List[Any]="<mask>" , _lowercase : Dict=False , _lowercase : str=True , **_lowercase : Any , ) -> Tuple:
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase , **_lowercase , )
_lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _lowercase ) != add_prefix_space:
_lowercase = getattr(_lowercase , pre_tok_state.pop("type" ) )
_lowercase = add_prefix_space
_lowercase = pre_tok_class(**_lowercase )
_lowercase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowercase = "post_processor"
_lowercase = getattr(self.backend_tokenizer , _lowercase , _lowercase )
if tokenizer_component_instance:
_lowercase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowercase = tuple(state["sep"] )
if "cls" in state:
_lowercase = tuple(state["cls"] )
_lowercase = False
if state.get("add_prefix_space" , _lowercase ) != add_prefix_space:
_lowercase = add_prefix_space
_lowercase = True
if state.get("trim_offsets" , _lowercase ) != trim_offsets:
_lowercase = trim_offsets
_lowercase = True
if changes_to_apply:
_lowercase = getattr(_lowercase , state.pop("type" ) )
_lowercase = component_class(**_lowercase )
setattr(self.backend_tokenizer , _lowercase , _lowercase )
@property
def _lowerCamelCase ( self : Optional[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self : Optional[int] , _lowercase : int ) -> Optional[Any]:
_lowercase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else value
_lowercase = value
def _lowerCamelCase ( self : str , *_lowercase : List[str] , **_lowercase : List[Any] ) -> BatchEncoding:
_lowercase = kwargs.get("is_split_into_words" , _lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_lowercase , **_lowercase )
def _lowerCamelCase ( self : Tuple , *_lowercase : str , **_lowercase : List[str] ) -> BatchEncoding:
_lowercase = kwargs.get("is_split_into_words" , _lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*_lowercase , **_lowercase )
def _lowerCamelCase ( self : List[str] , _lowercase : str , _lowercase : Optional[str] = None ) -> Tuple[str]:
_lowercase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
def _lowerCamelCase ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : str=None ) -> Any:
_lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ) -> List[int]:
_lowercase = [self.sep_token_id]
_lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 227 |
"""simple docstring"""
def __UpperCAmelCase ( _snake_case : int ):
if num < 0:
return False
_lowercase = num
_lowercase = 0
while num > 0:
_lowercase = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> bool:
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : int = number
while duplicate > 0:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = divmod(lowerCamelCase__, 1_0 )
fact_sum += factorial(lowerCamelCase__ )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
lowercase_ : List[Any] = int(input('''Enter number: ''').strip())
print(
F'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'
)
| 572 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : float ) -> float:
if edge <= 0 or not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise ValueError("Length must be a positive." )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCAmelCase ( lowerCamelCase__ : float ) -> float:
if edge <= 0 or not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise ValueError("Length must be a positive." )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 572 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCAmelCase_ ( lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[Any] ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__SCREAMING_SNAKE_CASE : Dict = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
__SCREAMING_SNAKE_CASE : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE : Dict = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE : Tuple = value
else:
__SCREAMING_SNAKE_CASE : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCAmelCase_ ( lowercase_ : Optional[Any] , lowercase_ : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : str = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__SCREAMING_SNAKE_CASE : Any = None
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE : int = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
__SCREAMING_SNAKE_CASE : int = True
elif name.split('''.''' )[0] == "proj":
__SCREAMING_SNAKE_CASE : Union[str, Any] = fairseq_model.proj
__SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__SCREAMING_SNAKE_CASE : Any = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.split(lowerCamelCase_ )[0].split('''.''' )[-2]
__SCREAMING_SNAKE_CASE : str = mapped_key.replace('''*''' , lowerCamelCase_ )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE : List[str] = """weight_g"""
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = """weight_v"""
elif "bias" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = """bias"""
elif "weight" in name:
__SCREAMING_SNAKE_CASE : str = """weight"""
else:
__SCREAMING_SNAKE_CASE : List[Any] = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def lowerCAmelCase_ ( lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = full_name.split('''conv_layers.''' )[-1]
__SCREAMING_SNAKE_CASE : Optional[int] = name.split('''.''' )
__SCREAMING_SNAKE_CASE : Tuple = int(items[0] )
__SCREAMING_SNAKE_CASE : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__SCREAMING_SNAKE_CASE : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__SCREAMING_SNAKE_CASE : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
def lowerCAmelCase_ ( lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = emb.weight.shape
__SCREAMING_SNAKE_CASE : Any = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( lowercase_ : Any ):
'''simple docstring'''
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE : int = f.readlines()
__SCREAMING_SNAKE_CASE : int = [line.split(''' ''' )[0] for line in lines]
__SCREAMING_SNAKE_CASE : List[Any] = len(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : List[str] = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(lowerCamelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : int , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = WavaVecaConfig.from_pretrained(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : int = SpeechaTextaConfig.from_pretrained(
lowerCamelCase_ , vocab_size=lowerCamelCase_ , decoder_layers=lowerCamelCase_ , do_stable_layer_norm=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
__SCREAMING_SNAKE_CASE : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__SCREAMING_SNAKE_CASE : Tuple = model[0].eval()
# set weights for wav2vec2 encoder
__SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaModel(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Tuple = recursively_load_weights_wavaveca(model.encoder , lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : List[Any] = SpeechaTextaForCausalLM(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase_ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__SCREAMING_SNAKE_CASE : List[str] = SpeechEncoderDecoderModel(encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Any = False
# add projection layer
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(projection_layer.weight )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(projection_layer.bias )
__SCREAMING_SNAKE_CASE : List[str] = create_vocab_dict(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Any = SpeechaTextaTokenizer(os.path.join(lowerCamelCase_ , '''vocab.json''' ) )
tokenizer.save_pretrained(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : int = hf_wavavec.config.to_dict()
__SCREAMING_SNAKE_CASE : Dict = tokenizer.pad_token_id
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.bos_token_id
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.eos_token_id
__SCREAMING_SNAKE_CASE : Union[str, Any] = """speech_to_text_2"""
__SCREAMING_SNAKE_CASE : List[Any] = """wav2vec2"""
__SCREAMING_SNAKE_CASE : Dict = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase_ )
hf_wavavec.save_pretrained(lowerCamelCase_ )
feature_extractor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10_224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
_lowerCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 713 |
"""simple docstring"""
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCAmelCase_ ( ):
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
a = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case=False ) -> Optional[Any]:
_UpperCAmelCase , _UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]:
_UpperCAmelCase = {}
_UpperCAmelCase = R""".*sequential.(\d+).*"""
_UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_UpperCAmelCase = key.replace(snake_case , snake_case )
if re.match(snake_case , snake_case ):
# replace sequential layers with list
_UpperCAmelCase = re.match(snake_case , snake_case ).group(1 )
_UpperCAmelCase = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(snake_case )//3}.linear." )
elif re.match(snake_case , snake_case ):
_UpperCAmelCase = int(re.match(snake_case , snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_UpperCAmelCase = 1 if projecton_layer == 0 else 2
_UpperCAmelCase = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
_UpperCAmelCase = value
_UpperCAmelCase = mixed_qkv.size(0 ) // 3
_UpperCAmelCase = mixed_qkv[:qkv_dim]
_UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
_UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
_UpperCAmelCase = query_layer
_UpperCAmelCase = key_layer
_UpperCAmelCase = value_layer
else:
_UpperCAmelCase = value
return model_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case=False ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = init_clap(snake_case , enable_fusion=snake_case )
clap_model.eval()
_UpperCAmelCase = clap_model.state_dict()
_UpperCAmelCase = rename_state_dict(snake_case )
_UpperCAmelCase = ClapConfig()
_UpperCAmelCase = enable_fusion
_UpperCAmelCase = ClapModel(snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case , strict=snake_case )
model.save_pretrained(snake_case )
transformers_config.save_pretrained(snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
a = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 518 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a = ""
a = ""
a = ""
a = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE ( ) -> None:
_UpperCAmelCase , _UpperCAmelCase = get_dataset(snake_case , snake_case )
print("""Processing...""" )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = update_image_and_anno(snake_case , snake_case , snake_case )
for index, image in enumerate(snake_case ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCAmelCase = random_chars(3_2 )
_UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_UpperCAmelCase = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(f"/{file_root}.jpg" , snake_case , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"Success {index+1}/{len(snake_case )} with {file_name}" )
_UpperCAmelCase = []
for anno in new_annos[index]:
_UpperCAmelCase = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(snake_case )
with open(f"/{file_root}.txt" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> tuple[list, list]:
_UpperCAmelCase = []
_UpperCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case , """*.txt""" ) ):
_UpperCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case ) as in_file:
_UpperCAmelCase = in_file.readlines()
_UpperCAmelCase = os.path.join(snake_case , f"{label_name}.jpg" )
_UpperCAmelCase = []
for obj_list in obj_lists:
_UpperCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(snake_case )
labels.append(snake_case )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case = 1 ) -> tuple[list, list, list]:
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for idx in range(len(snake_case ) ):
_UpperCAmelCase = []
_UpperCAmelCase = img_list[idx]
path_list.append(snake_case )
_UpperCAmelCase = anno_list[idx]
_UpperCAmelCase = cva.imread(snake_case )
if flip_type == 1:
_UpperCAmelCase = cva.flip(snake_case , snake_case )
for bbox in img_annos:
_UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_UpperCAmelCase = cva.flip(snake_case , snake_case )
for bbox in img_annos:
_UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(snake_case )
new_imgs_list.append(snake_case )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE ( snake_case = 3_2 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case ) for _ in range(snake_case ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 518 | 1 |
'''simple docstring'''
from math import pi, sqrt
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> float:
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def SCREAMING_SNAKE_CASE( ) -> None:
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ = 1.0
while num:
lowerCAmelCase__ = float(input("Gamma of: "))
print(F'gamma({num}) = {gamma(num)}')
print("\nEnter 0 to exit...")
| 471 |
'''simple docstring'''
import math
class lowercase :
def __init__( self , _snake_case=0) -> Union[str, Any]: # a graph with Node 0,1,...,N-1
UpperCAmelCase_ : Tuple = n
UpperCAmelCase_ : Optional[Any] = [
[math.inf for j in range(0 , _snake_case)] for i in range(0 , _snake_case)
] # adjacency matrix for weight
UpperCAmelCase_ : Tuple = [
[math.inf for j in range(0 , _snake_case)] for i in range(0 , _snake_case)
] # dp[i][j] stores minimum distance from i to j
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = w
def _snake_case ( self) -> str:
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
UpperCAmelCase_ : Optional[int] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def _snake_case ( self , _snake_case , _snake_case) -> str:
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase__ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 471 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : List[str] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_28, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_42, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
_lowercase : Optional[int] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_28,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_42,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(UpperCamelCase ) , UpperCamelCase )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCamelCase ) , x.transpose() ) )
_lowercase : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_lowercase : List[Any] = np.random.randn(3 , 4 )
_lowercase : List[Any] = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase ) , transpose(UpperCamelCase ).numpy() ) )
_lowercase : int = np.random.randn(3 , 4 , 5 )
_lowercase : Dict = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase , axes=(1, 2, 0) ) , transpose(UpperCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : Any = np.random.randn(3 , 4 )
_lowercase : Tuple = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase ) , transpose(UpperCamelCase ).numpy() ) )
_lowercase : Tuple = np.random.randn(3 , 4 , 5 )
_lowercase : Optional[Any] = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase , axes=(1, 2, 0) ) , transpose(UpperCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_lowercase : Dict = np.random.randn(3 , 4 )
_lowercase : Optional[Any] = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase ) , np.asarray(transpose(UpperCamelCase ) ) ) )
_lowercase : Optional[int] = np.random.randn(3 , 4 , 5 )
_lowercase : int = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(transpose(UpperCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCamelCase , axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_lowercase : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (4, 3) ) , np.reshape(UpperCamelCase , (4, 3) ) ) )
_lowercase : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (12, 5) ) , np.reshape(UpperCamelCase , (12, 5) ) ) )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_lowercase : Optional[int] = np.random.randn(3 , 4 )
_lowercase : Optional[int] = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (4, 3) ) , reshape(UpperCamelCase , (4, 3) ).numpy() ) )
_lowercase : Tuple = np.random.randn(3 , 4 , 5 )
_lowercase : List[str] = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (12, 5) ) , reshape(UpperCamelCase , (12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : Any = np.random.randn(3 , 4 )
_lowercase : str = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (4, 3) ) , reshape(UpperCamelCase , (4, 3) ).numpy() ) )
_lowercase : int = np.random.randn(3 , 4 , 5 )
_lowercase : str = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (12, 5) ) , reshape(UpperCamelCase , (12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_lowercase : Union[str, Any] = np.random.randn(3 , 4 )
_lowercase : Optional[int] = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (4, 3) ) , np.asarray(reshape(UpperCamelCase , (4, 3) ) ) ) )
_lowercase : Any = np.random.randn(3 , 4 , 5 )
_lowercase : Dict = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(reshape(UpperCamelCase , (12, 5) ) , np.asarray(reshape(UpperCamelCase , (12, 5) ) ) ) )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_lowercase : Dict = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ) , np.squeeze(UpperCamelCase ) ) )
_lowercase : Dict = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase , axis=2 ) , np.squeeze(UpperCamelCase , axis=2 ) ) )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : Dict = np.random.randn(1 , 3 , 4 )
_lowercase : Any = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ) , squeeze(UpperCamelCase ).numpy() ) )
_lowercase : Dict = np.random.randn(1 , 4 , 1 , 5 )
_lowercase : int = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase , axis=2 ) , squeeze(UpperCamelCase , axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : Dict = np.random.randn(1 , 3 , 4 )
_lowercase : int = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ) , squeeze(UpperCamelCase ).numpy() ) )
_lowercase : int = np.random.randn(1 , 4 , 1 , 5 )
_lowercase : str = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase , axis=2 ) , squeeze(UpperCamelCase , axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Any = np.random.randn(1 , 3 , 4 )
_lowercase : str = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase ) , np.asarray(squeeze(UpperCamelCase ) ) ) )
_lowercase : Any = np.random.randn(1 , 4 , 1 , 5 )
_lowercase : Dict = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(squeeze(UpperCamelCase , axis=2 ) , np.asarray(squeeze(UpperCamelCase , axis=2 ) ) ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase , axis=1 ) , np.expand_dims(UpperCamelCase , axis=1 ) ) )
@require_torch
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : str = np.random.randn(3 , 4 )
_lowercase : Tuple = torch.tensor(UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase , axis=1 ) , expand_dims(UpperCamelCase , axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : List[Any] = np.random.randn(3 , 4 )
_lowercase : Tuple = tf.constant(UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase , axis=1 ) , expand_dims(UpperCamelCase , axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_lowercase : Union[str, Any] = np.random.randn(3 , 4 )
_lowercase : Tuple = jnp.array(UpperCamelCase )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase , axis=1 ) , np.asarray(expand_dims(UpperCamelCase , axis=1 ) ) ) )
| 322 |
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
_lowercase : str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowercase : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 | 1 |
def lowerCamelCase_ ( lowerCAmelCase__ : int = 1000 ) -> int:
'''simple docstring'''
A = 3
A = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 224 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :int =logging.get_logger(__name__)
__snake_case :Any ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : List[Any] = 'switch_transformers'
A_ : str = ['past_key_values']
A_ : Optional[int] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Dict , __UpperCamelCase : List[str]=32_128 , __UpperCamelCase : Optional[Any]=768 , __UpperCamelCase : Optional[int]=64 , __UpperCamelCase : Tuple=2_048 , __UpperCamelCase : Dict=64 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : List[Any]=12 , __UpperCamelCase : Optional[int]=8 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Optional[int]=0.0_1 , __UpperCamelCase : Any="float32" , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[Any]=32 , __UpperCamelCase : List[Any]=128 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : int=1e-6 , __UpperCamelCase : Union[str, Any]=0.0_0_1 , __UpperCamelCase : Tuple=0.0_0_1 , __UpperCamelCase : int=1.0 , __UpperCamelCase : Optional[Any]="relu" , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : List[str]=False , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=0 , __UpperCamelCase : int=1 , **__UpperCamelCase : Union[str, Any] , ) -> Dict:
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_sparse_encoder_layers
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A = self.num_layers // self.num_sparse_encoder_layers
else:
A = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A = self.num_decoder_layers # HACK: this will create 0 sparse layers
A = num_heads
A = num_experts
A = expert_capacity
A = router_bias
A = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
A = router_dtype
A = router_ignore_padding_tokens
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = add_router_probs
A = router_z_loss_coef
A = router_aux_loss_coef
A = self.feed_forward_proj.split('-' )
A = act_info[-1]
A = act_info[0] == 'gated'
if len(__UpperCamelCase ) > 1 and act_info[0] != "gated" or len(__UpperCamelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A = 'gelu_new'
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase , )
| 224 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] , snake_case__ :List[Any] , snake_case__ :Any=[] ) -> Any:
_lowercase = size[0] - overlap_pixels * 2
_lowercase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowercase = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_lowercase = np.pad(snake_case__ , mode='linear_ramp' , pad_width=snake_case__ , end_values=0 )
if "l" in remove_borders:
_lowercase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowercase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowercase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowercase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :List[str] , snake_case__ :List[str] ) -> Dict:
return max(snake_case__ , min(snake_case__ , snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :[int] , snake_case__ :[int] , snake_case__ :[int] ) -> Optional[Any]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :[int] , snake_case__ :int , snake_case__ :[int] ) -> Dict:
_lowercase = list(snake_case__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowercase = clamp_rect(snake_case__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :int , snake_case__ :Dict , snake_case__ :str ) -> Tuple:
_lowercase = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(snake_case__ , (original_slice, 0) )
return result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :Tuple ) -> List[str]:
_lowercase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowercase = tile.crop(snake_case__ )
return tile
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :Optional[int] ) -> Tuple:
_lowercase = n % d
return n - divisor
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str ,__A : AutoencoderKL ,__A : CLIPTextModel ,__A : CLIPTokenizer ,__A : UNetaDConditionModel ,__A : DDPMScheduler ,__A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,__A : int = 350 ,) -> Union[str, Any]:
super().__init__(
vae=__A ,text_encoder=__A ,tokenizer=__A ,unet=__A ,low_res_scheduler=__A ,scheduler=__A ,max_noise_level=__A ,)
def __UpperCAmelCase ( self : Dict ,__A : Dict ,__A : Tuple ,__A : List[Any] ,__A : str ,__A : Optional[Any] ,__A : Union[str, Any] ,__A : Optional[int] ,**__A : List[Any] ) -> int:
torch.manual_seed(0 )
_lowercase = (
min(image.size[0] - (tile_size + original_image_slice) ,x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) ,y * tile_size ),
min(image.size[0] ,(x + 1) * tile_size ),
min(image.size[1] ,(y + 1) * tile_size ),
)
_lowercase = add_overlap_rect(__A ,__A ,image.size )
_lowercase = image.crop(__A )
_lowercase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowercase = translated_slice_x - (original_image_slice / 2)
_lowercase = max(0 ,__A )
_lowercase = squeeze_tile(__A ,__A ,__A ,__A )
_lowercase = to_input.size
_lowercase = to_input.resize((tile_size, tile_size) ,Image.BICUBIC )
_lowercase = super(__A ,self ).__call__(image=__A ,**__A ).images[0]
_lowercase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) ,Image.BICUBIC )
_lowercase = unsqueeze_tile(__A ,__A )
_lowercase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) ,Image.BICUBIC )
_lowercase = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
_lowercase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) ,tile_border * 4 ,remove_borders=__A ) ,mode='L' ,)
final_image.paste(
__A ,(crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) ,__A )
@torch.no_grad()
def __call__( self : List[Any] ,__A : Union[str, List[str]] ,__A : Union[PIL.Image.Image, List[PIL.Image.Image]] ,__A : int = 75 ,__A : float = 9.0 ,__A : int = 50 ,__A : Optional[Union[str, List[str]]] = None ,__A : Optional[int] = 1 ,__A : float = 0.0 ,__A : Optional[torch.Generator] = None ,__A : Optional[torch.FloatTensor] = None ,__A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,__A : int = 1 ,__A : int = 128 ,__A : int = 32 ,__A : int = 32 ,) -> int:
_lowercase = Image.new('RGB' ,(image.size[0] * 4, image.size[1] * 4) )
_lowercase = math.ceil(image.size[0] / tile_size )
_lowercase = math.ceil(image.size[1] / tile_size )
_lowercase = tcx * tcy
_lowercase = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A ,__A ,__A ,__A ,__A ,__A ,__A ,prompt=__A ,num_inference_steps=__A ,guidance_scale=__A ,noise_level=__A ,negative_prompt=__A ,num_images_per_prompt=__A ,eta=__A ,generator=__A ,latents=__A ,)
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
# Run a demo
_lowercase = 'stabilityai/stable-diffusion-x4-upscaler'
_lowercase = StableDiffusionTiledUpscalePipeline.from_pretrained(snake_case__ , revision='fp16' , torch_dtype=torch.floataa )
_lowercase = pipe.to('cuda' )
_lowercase = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(snake_case__ :int ):
print(F"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('diffusers_library_progress.jpg' )
_lowercase = pipe(image=snake_case__ , prompt='Black font, white background, vector' , noise_level=40 , callback=snake_case__ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 67 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowercase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = nn.functional.normalize(snake_case__)
lowerCAmelCase_ : Optional[Any] = nn.functional.normalize(snake_case__)
return torch.mm(snake_case__ , normalized_text_embeds.t())
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = CLIPConfig
UpperCamelCase_ = ['CLIPEncoderLayer']
def __init__( self : Tuple ,lowerCAmelCase__ : CLIPConfig ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowerCAmelCase_ : Any = CLIPVisionModel(config.vision_config )
lowerCAmelCase_ : Dict = nn.Linear(config.vision_config.hidden_size ,config.projection_dim ,bias=lowerCAmelCase__ )
lowerCAmelCase_ : int = nn.Parameter(torch.ones(17 ,config.projection_dim ) ,requires_grad=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = nn.Parameter(torch.ones(3 ,config.projection_dim ) ,requires_grad=lowerCAmelCase__ )
lowerCAmelCase_ : int = nn.Parameter(torch.ones(17 ) ,requires_grad=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = nn.Parameter(torch.ones(3 ) ,requires_grad=lowerCAmelCase__ )
@torch.no_grad()
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : str = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
lowerCAmelCase_ : Optional[Any] = self.visual_projection(lowerCAmelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase_ : Union[str, Any] = cosine_distance(lowerCAmelCase__ ,self.special_care_embeds ).cpu().float().numpy()
lowerCAmelCase_ : Optional[Any] = cosine_distance(lowerCAmelCase__ ,self.concept_embeds ).cpu().float().numpy()
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Union[str, Any] = image_embeds.shape[0]
for i in range(lowerCAmelCase__ ):
lowerCAmelCase_ : Any = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase_ : Tuple = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCAmelCase_ : int = special_cos_dist[i][concept_idx]
lowerCAmelCase_ : Dict = self.special_care_embeds_weights[concept_idx].item()
lowerCAmelCase_ : List[str] = round(concept_cos - concept_threshold + adjustment ,3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
lowerCAmelCase_ : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCAmelCase_ : int = cos_dist[i][concept_idx]
lowerCAmelCase_ : Optional[int] = self.concept_embeds_weights[concept_idx].item()
lowerCAmelCase_ : Optional[Any] = round(concept_cos - concept_threshold + adjustment ,3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : torch.FloatTensor ,lowerCAmelCase__ : torch.FloatTensor ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
lowerCAmelCase_ : Optional[int] = self.visual_projection(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = cosine_distance(lowerCAmelCase__ ,self.special_care_embeds )
lowerCAmelCase_ : Tuple = cosine_distance(lowerCAmelCase__ ,self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCAmelCase_ : Optional[int] = torch.any(special_scores > 0 ,dim=1 )
lowerCAmelCase_ : str = special_care * 0.01
lowerCAmelCase_ : Optional[Any] = special_adjustment.unsqueeze(1 ).expand(-1 ,cos_dist.shape[1] )
lowerCAmelCase_ : Tuple = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCAmelCase_ : Tuple = torch.any(concept_scores > 0 ,dim=1 )
return images, has_nsfw_concepts
| 683 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowercase = None
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case__ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def UpperCamelCase ( snake_case__):
def remove_articles(snake_case__):
return ARTICLES_REGEX.sub(" " , snake_case__)
def white_space_fix(snake_case__):
return " ".join(text.split())
def remove_punc(snake_case__):
lowerCAmelCase_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case__):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__))))
def UpperCamelCase ( snake_case__):
if not s:
return []
return normalize_answer(snake_case__).split()
def UpperCamelCase ( snake_case__ , snake_case__):
return int(normalize_answer(snake_case__) == normalize_answer(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = get_tokens(snake_case__)
lowerCAmelCase_ : Union[str, Any] = get_tokens(snake_case__)
lowerCAmelCase_ : Any = collections.Counter(snake_case__) & collections.Counter(snake_case__)
lowerCAmelCase_ : Dict = sum(common.values())
if len(snake_case__) == 0 or len(snake_case__) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase_ : List[Any] = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : int = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : int = qa["id"]
lowerCAmelCase_ : Any = [t for t in qa["answers"]["text"] if normalize_answer(snake_case__)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ : Any = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
lowerCAmelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ : Any = max(compute_exact(snake_case__ , snake_case__) for a in gold_answers)
lowerCAmelCase_ : Optional[Any] = max(compute_fa(snake_case__ , snake_case__) for a in gold_answers)
return exact_scores, fa_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = {}
for qid, s in scores.items():
lowerCAmelCase_ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ : List[str] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase_ : Union[str, Any] = s
return new_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None):
if not qid_list:
lowerCAmelCase_ : Any = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCAmelCase_ : Tuple = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
for k in new_eval:
lowerCAmelCase_ : Union[str, Any] = new_eval[k]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
plt.step(snake_case__ , snake_case__ , color="b" , alpha=0.2 , where="post")
plt.fill_between(snake_case__ , snake_case__ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(snake_case__)
plt.savefig(snake_case__)
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
lowerCAmelCase_ : List[Any] = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
lowerCAmelCase_ : Dict = 0.0
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : Tuple = [1.0]
lowerCAmelCase_ : Tuple = [0.0]
lowerCAmelCase_ : Dict = 0.0
for i, qid in enumerate(snake_case__):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ : str = true_pos / float(i + 1)
lowerCAmelCase_ : Union[str, Any] = true_pos / float(snake_case__)
if i == len(snake_case__) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__)
recalls.append(snake_case__)
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return {"ap": 100.0 * avg_prec}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
if out_image_dir and not os.path.exists(snake_case__):
os.makedirs(snake_case__)
lowerCAmelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase_ : Any = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCAmelCase_ : Dict = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCAmelCase_ : Dict = {k: float(snake_case__) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ : str = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case__ , snake_case__ , "pr_exact")
merge_eval(snake_case__ , snake_case__ , "pr_f1")
merge_eval(snake_case__ , snake_case__ , "pr_oracle")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if not qid_list:
return
lowerCAmelCase_ : Optional[Any] = [na_probs[k] for k in qid_list]
lowerCAmelCase_ : Dict = np.ones_like(snake_case__) / float(len(snake_case__))
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(snake_case__ , F'''na_prob_hist_{name}.png'''))
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase_ : str = num_no_ans
lowerCAmelCase_ : List[str] = cur_score
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : str = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
for i, qid in enumerate(snake_case__):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ : List[Any] = -1
else:
lowerCAmelCase_ : List[str] = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ : Optional[Any] = cur_score
lowerCAmelCase_ : Optional[int] = na_probs[qid]
return 100.0 * best_score / len(snake_case__), best_thresh
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = best_exact
lowerCAmelCase_ : List[str] = exact_thresh
lowerCAmelCase_ : Any = best_fa
lowerCAmelCase_ : List[str] = fa_thresh
def UpperCamelCase ( ):
with open(OPTS.data_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
lowerCAmelCase_ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCAmelCase_ : int = json.load(snake_case__)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
else:
lowerCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
lowerCAmelCase_ : Tuple = make_qid_to_has_ans(snake_case__) # maps qid to True/False
lowerCAmelCase_ : Any = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_raw_scores(snake_case__ , snake_case__)
lowerCAmelCase_ : str = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Dict = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__)
if has_ans_qids:
lowerCAmelCase_ : str = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "HasAns")
if no_ans_qids:
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir)
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(snake_case__ , snake_case__)
else:
print(json.dumps(snake_case__ , indent=2))
if __name__ == "__main__":
_lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 683 | 1 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _snake_case :
@property
def UpperCamelCase__ ( self ):
return self.get_dummy_input()
@property
def UpperCamelCase__ ( self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def UpperCamelCase__ ( self ,_snake_case=True ,_snake_case=False ,_snake_case=False ,_snake_case=False ,):
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Any = 32
UpperCAmelCase_ : Tuple = (32, 32)
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : str = torch.device(_snake_case )
UpperCAmelCase_ : Optional[Any] = (batch_size, num_channels) + sizes
UpperCAmelCase_ : Tuple = randn_tensor(_snake_case ,generator=_snake_case ,device=_snake_case )
UpperCAmelCase_ : Union[str, Any] = {"hidden_states": hidden_states}
if include_temb:
UpperCAmelCase_ : Optional[Any] = 1_28
UpperCAmelCase_ : Any = randn_tensor((batch_size, temb_channels) ,generator=_snake_case ,device=_snake_case )
if include_res_hidden_states_tuple:
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(1 )
UpperCAmelCase_ : List[str] = (randn_tensor(_snake_case ,generator=_snake_case ,device=_snake_case ),)
if include_encoder_hidden_states:
UpperCAmelCase_ : int = floats_tensor((batch_size, 32, 32) ).to(_snake_case )
if include_skip_sample:
UpperCAmelCase_ : str = randn_tensor(((batch_size, 3) + sizes) ,generator=_snake_case ,device=_snake_case )
return dummy_input
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 1_28,
}
if self.block_type == "up":
UpperCAmelCase_ : List[str] = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
UpperCAmelCase_ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Tuple = self.block_class(**_snake_case )
unet_block.to(_snake_case )
unet_block.eval()
with torch.no_grad():
UpperCAmelCase_ : Any = unet_block(**_snake_case )
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : List[Any] = output[0]
self.assertEqual(output.shape ,self.output_shape )
UpperCAmelCase_ : Union[str, Any] = output[0, -1, -3:, -3:]
UpperCAmelCase_ : Tuple = torch.tensor(_snake_case ).to(_snake_case )
assert torch_all_close(output_slice.flatten() ,_snake_case ,atol=5E-3 )
@unittest.skipIf(torch_device == "mps" ,"Training is not supported in mps" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Tuple = self.block_class(**_snake_case )
model.to(_snake_case )
model.train()
UpperCAmelCase_ : List[Any] = model(**_snake_case )
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = output[0]
UpperCAmelCase_ : int = torch.device(_snake_case )
UpperCAmelCase_ : List[str] = randn_tensor(output.shape ,device=_snake_case )
UpperCAmelCase_ : str = torch.nn.functional.mse_loss(_snake_case ,_snake_case )
loss.backward()
| 71 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =XGLMTokenizer
_lowerCamelCase =XGLMTokenizerFast
_lowerCamelCase =True
_lowerCamelCase =True
def __snake_case ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(a__ ) , 1008 )
def __snake_case ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __snake_case ( self : Optional[Any] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __snake_case ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ )
UpperCAmelCase = pickle.dumps(a__ )
pickle.loads(a__ )
def __snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case ( self : int ):
UpperCAmelCase = '''Hello World!'''
UpperCAmelCase = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : List[str] ):
UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : Any ):
# fmt: off
UpperCAmelCase = {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
| 51 | 0 |
"""simple docstring"""
snake_case = 8.3_1_4_4_5_9_8
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
snake_case = 3_0_0
snake_case = 2_8
snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 406 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = '▁'
snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class UpperCamelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : int = BertGenerationTokenizer
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : str = True
def A ( self ) -> int:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE = BertGenerationTokenizer(lowercase__ , keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '<s>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(lowercase__ ) , 1002 )
def A ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BertGenerationTokenizer(lowercase__ , keep_accents=lowercase__ )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [285, 46, 10, 170, 382] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A ( self ) -> Any:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'Hello World!'
SCREAMING_SNAKE_CASE = [18536, 2260, 101]
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@slow
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
SCREAMING_SNAKE_CASE = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@require_torch
@slow
def A ( self ) -> int:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
SCREAMING_SNAKE_CASE = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE = ' '.join(lowercase__ )
SCREAMING_SNAKE_CASE = self.big_tokenizer.encode_plus(lowercase__ , return_tensors='pt' , return_token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE = BertGenerationConfig()
SCREAMING_SNAKE_CASE = BertGenerationEncoder(lowercase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase__ )
model(**lowercase__ )
@slow
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 406 | 1 |
import os
import string
import sys
SCREAMING_SNAKE_CASE__ = 1 << 8
SCREAMING_SNAKE_CASE__ = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
SCREAMING_SNAKE_CASE__ = KEYMAP['''up''']
SCREAMING_SNAKE_CASE__ = KEYMAP['''left''']
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
SCREAMING_SNAKE_CASE__ = ord(str(i))
def A ( ) -> Union[str, Any]:
if os.name == "nt":
import msvcrt
A__ = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCamelCase ) == 0:
# Read the keystroke
A__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
A__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
A__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(__UpperCamelCase )
if ord(__UpperCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
A__ = chr(KEYMAP['esc'] )
except KeyError:
A__ = cha[1]
else:
A__ = ch.decode(__UpperCamelCase )
else:
A__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
A__ = sys.stdin.fileno()
A__ = termios.tcgetattr(__UpperCamelCase )
try:
tty.setraw(__UpperCamelCase )
A__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCamelCase , termios.TCSADRAIN , __UpperCamelCase )
return ch
def A ( ) -> Dict:
A__ = get_raw_chars()
if ord(__UpperCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCamelCase ) == KEYMAP["esc"]:
A__ = get_raw_chars()
if ord(__UpperCamelCase ) == KEYMAP["mod_int"]:
A__ = get_raw_chars()
if ord(__UpperCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 9 |
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def A ( __UpperCamelCase = 1_777 , __UpperCamelCase = 1_855 , __UpperCamelCase = 8 ) -> int:
A__ = base
for _ in range(1 , __UpperCamelCase ):
A__ = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 9 | 1 |
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : str = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase_ )
if number < 0:
return False
__a : Dict = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
SCREAMING_SNAKE_CASE__ = 3
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
print('Generating primitive root of p' )
while True:
__a : Optional[int] = random.randrange(3 , lowerCamelCase_ )
if pow(lowerCamelCase_ , 2 , lowerCamelCase_ ) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) == 1:
continue
return g
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
print('Generating prime p...' )
__a : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_ ) # select large prime number.
__a : Any = primitive_root(lowerCamelCase_ ) # one primitive root on modulo p.
__a : Optional[int] = random.randrange(3 , lowerCamelCase_ ) # private_key -> have to be greater than 2 for safety.
__a : Optional[int] = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
__a : List[Any] = (key_size, e_a, e_a, p)
__a : str = (key_size, d)
return public_key, private_key
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__a , __a : Dict = generate_key(lowerCamelCase_ )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , 'w' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , 'w' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def UpperCAmelCase__ ( ):
print('Making key files...' )
make_key_files('elgamal' , 2_0_4_8 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 577 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowercase__ ( snake_case_ :Optional[Any] ):
__UpperCAmelCase = 384
if "tiny" in model_name:
__UpperCAmelCase = [3, 3, 9, 3]
__UpperCAmelCase = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase = [3, 3, 27, 3]
__UpperCAmelCase = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase = [3, 3, 27, 3]
__UpperCAmelCase = [128, 256, 512, 1_024]
__UpperCAmelCase = 512
if "large" in model_name:
__UpperCAmelCase = [3, 3, 27, 3]
__UpperCAmelCase = [192, 384, 768, 1_536]
__UpperCAmelCase = 768
if "xlarge" in model_name:
__UpperCAmelCase = [3, 3, 27, 3]
__UpperCAmelCase = [256, 512, 1_024, 2_048]
__UpperCAmelCase = 1_024
# set label information
__UpperCAmelCase = 150
__UpperCAmelCase = '''huggingface/label-files'''
__UpperCAmelCase = '''ade20k-id2label.json'''
__UpperCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
__UpperCAmelCase = ConvNextConfig(
depths=snake_case_ , hidden_sizes=snake_case_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__UpperCAmelCase = UperNetConfig(
backbone_config=snake_case_ , auxiliary_in_channels=snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def lowercase__ ( snake_case_ :Dict ):
__UpperCAmelCase = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Optional[int] , snake_case_ :Any ):
__UpperCAmelCase = dct.pop(snake_case_ )
__UpperCAmelCase = val
def lowercase__ ( snake_case_ :Dict , snake_case_ :List[str] , snake_case_ :List[Any] ):
__UpperCAmelCase = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
__UpperCAmelCase = model_name_to_url[model_name]
__UpperCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' )['''state_dict''']
__UpperCAmelCase = get_upernet_config(snake_case_ )
__UpperCAmelCase = UperNetForSemanticSegmentation(snake_case_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase = state_dict.pop(snake_case_ )
if "bn" in key:
__UpperCAmelCase = key.replace('''bn''' , '''batch_norm''' )
__UpperCAmelCase = val
# rename keys
__UpperCAmelCase = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
# verify on image
__UpperCAmelCase = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__UpperCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' )
__UpperCAmelCase = SegformerImageProcessor()
__UpperCAmelCase = processor(snake_case_ , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__UpperCAmelCase = model(snake_case_ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowercase : Dict = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49 |
def UpperCamelCase ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
snake_case : Dict = set()
# Replace all the whitespace in our sentence
snake_case : List[Any] = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowerCamelCase ) == 26
def UpperCamelCase ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
snake_case : Optional[int] = [False] * 26
for char in input_str:
if char.islower():
snake_case : Dict = True
elif char.isupper():
snake_case : Optional[Any] = True
return all(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase ( ):
from timeit import timeit
snake_case : Tuple = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=__lowerCamelCase ) )
print(timeit("is_pangram_faster()" , setup=__lowerCamelCase ) )
print(timeit("is_pangram_fastest()" , setup=__lowerCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 204 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 479 |
from __future__ import annotations
from collections.abc import MutableSequence
class __lowercase :
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(__lowerCamelCase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase = list(__lowerCamelCase )
lowercase = degree
def __add__( self : Any , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __lowerCamelCase )
else:
lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __lowerCamelCase )
def __sub__( self : str , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : str ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[str] , __lowerCamelCase : Polynomial ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __lowerCamelCase )
def __a ( self : List[str] , __lowerCamelCase : int | float ) -> int | float:
'''simple docstring'''
lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : str ) -> str:
'''simple docstring'''
lowercase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCamelCase )
return polynomial
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return self.__str__()
def __a ( self : Union[str, Any] ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * self.degree
for i in range(self.degree ):
lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __lowerCamelCase )
def __a ( self : Union[str, Any] , __lowerCamelCase : int | float = 0 ) -> Polynomial:
'''simple docstring'''
lowercase = [0] * (self.degree + 2)
lowercase = constant
for i in range(self.degree + 1 ):
lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __lowerCamelCase )
def __eq__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , __lowerCamelCase : object ) -> bool:
'''simple docstring'''
return not self.__eq__(__lowerCamelCase )
| 479 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =RoCBertTokenizer
_lowerCamelCase =None
_lowerCamelCase =False
_lowerCamelCase =True
_lowerCamelCase =filter_non_english
def __snake_case ( self : Any ):
super().setUp()
UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
UpperCAmelCase = {}
UpperCAmelCase = {}
for i, value in enumerate(a__ ):
UpperCAmelCase = i
UpperCAmelCase = i
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(a__ , a__ , ensure_ascii=a__ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(a__ , a__ , ensure_ascii=a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCAmelCase = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(a__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(a__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(a__ ) , [5, 6, 2, 5, 7, 8] )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self : Tuple ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self : Tuple ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self : int ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=a__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCAmelCase = {}
for i, token in enumerate(a__ ):
UpperCAmelCase = i
UpperCAmelCase = RoCBertWordpieceTokenizer(vocab=a__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __snake_case ( self : int ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __snake_case ( self : Union[str, Any] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __snake_case ( self : Any ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
UpperCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(a__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def __snake_case ( self : int ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
UpperCAmelCase = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(
a__ , return_attention_mask=a__ , return_token_type_ids=a__ , return_offsets_mapping=a__ , add_special_tokens=a__ , )
UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(a__ , '''do_lower_case''' ) else False
UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __snake_case ( self : List[str] ):
UpperCAmelCase = ['''的''', '''人''', '''有''']
UpperCAmelCase = ''''''.join(a__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer_class.from_pretrained(a__ , **a__ )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
UpperCAmelCase = tokenizer_p.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = tokenizer_r.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(a__ )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(a__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = False
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(a__ , **a__ )
UpperCAmelCase = tokenizer_r.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = tokenizer_p.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(a__ )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(a__ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(a__ )
]
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCAmelCase = tokenizer.encode('''你好''' , add_special_tokens=a__ )
UpperCAmelCase = tokenizer.encode('''你是谁''' , add_special_tokens=a__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(a__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __snake_case ( self : str ):
UpperCAmelCase = self.get_tokenizers(do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase = '''你好,你是谁'''
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
UpperCAmelCase = tokenizer.convert_tokens_to_shape_ids(a__ )
UpperCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(a__ )
UpperCAmelCase = tokenizer.prepare_for_model(
a__ , a__ , a__ , add_special_tokens=a__ )
UpperCAmelCase = tokenizer.encode_plus(a__ , add_special_tokens=a__ )
self.assertEqual(a__ , a__ )
| 51 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
__snake_case : Union[str, Any] = {'target_lang': 'fi', 'source_lang': 'en'}
__snake_case : Union[str, Any] = '>>zh<<'
__snake_case : List[str] = 'Helsinki-NLP/'
if is_torch_available():
__snake_case : Optional[int] = 'pt'
elif is_tf_available():
__snake_case : List[Any] = 'tf'
else:
__snake_case : Union[str, Any] = 'jax'
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MarianTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : Dict = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__lowerCAmelCase : Union[str, Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE))))
__lowerCAmelCase : int = Path(self.tmpdirname)
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["vocab"])
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["source_spm"])
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["target_spm"])
__lowerCAmelCase : Tuple = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self: Dict , **_SCREAMING_SNAKE_CASE: List[str]) -> MarianTokenizer:
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "</s>"
__lowerCAmelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "</s>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "<pad>")
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , 9)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""")
__lowerCAmelCase : int = en_de_tokenizer(["I am a small frog"] , return_tensors=_SCREAMING_SNAKE_CASE)
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , batch.input_ids[0])
__lowerCAmelCase : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = [x.name for x in Path(_SCREAMING_SNAKE_CASE).glob("*")]
self.assertIn("source.spm" , _SCREAMING_SNAKE_CASE)
MarianTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.get_tokenizer()
__lowerCAmelCase : int = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE)
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertEqual(batch.input_ids.shape , (2, 512))
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : str = tok(["I am a tiny frog", "I am a small frog"] , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE)
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertEqual(batch_smaller.input_ids.shape , (2, 10))
@slow
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs")
__lowerCAmelCase : List[str] = "Tämä on testi"
__lowerCAmelCase : int = "This is a test"
__lowerCAmelCase : Union[str, Any] = [76, 7, 2047, 2]
__lowerCAmelCase : Dict = [69, 12, 11, 940, 2]
__lowerCAmelCase : List[str] = tokenizer(_SCREAMING_SNAKE_CASE).input_ids
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = tokenizer(text_target=_SCREAMING_SNAKE_CASE).input_ids
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE)
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
| 293 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase = """true"""
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=82 , SCREAMING_SNAKE_CASE=16 )-> Tuple:
"""simple docstring"""
set_seed(42 )
snake_case_ = RegressionModel()
snake_case_ = deepcopy(SCREAMING_SNAKE_CASE )
snake_case_ = RegressionDataset(length=SCREAMING_SNAKE_CASE )
snake_case_ = DataLoader(SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
snake_case_ , snake_case_ = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False )-> Any:
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
snake_case_ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
snake_case_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
snake_case_ = dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
snake_case_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=16 )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Optional[Any]:
"""simple docstring"""
snake_case_ = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
snake_case_ = get_dataloader(SCREAMING_SNAKE_CASE , not dispatch_batches )
snake_case_ = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
snake_case_ = []
for batch in dataloader:
snake_case_ , snake_case_ = batch.values()
with torch.no_grad():
snake_case_ = model(SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
snake_case_ , snake_case_ = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE )
targs.append(SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = torch.cat(SCREAMING_SNAKE_CASE ), torch.cat(SCREAMING_SNAKE_CASE )
return logits, targs
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=82 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=16 )-> Dict:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ = get_basic_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = generate_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert (
len(SCREAMING_SNAKE_CASE ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE )}'''
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False )-> Optional[int]:
"""simple docstring"""
snake_case_ = evaluate.load('''glue''' , '''mrpc''' )
snake_case_ , snake_case_ = get_mrpc_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# First do baseline
snake_case_ , snake_case_ , snake_case_ = setup['''no''']
model.to(SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE )
with torch.inference_mode():
snake_case_ = model(**SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE , references=batch['''labels'''] )
snake_case_ = metric.compute()
# Then do distributed
snake_case_ , snake_case_ , snake_case_ = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
snake_case_ = model(**SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ = batch['''labels''']
snake_case_ , snake_case_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE )
snake_case_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __lowerCAmelCase ()-> Union[str, Any]:
"""simple docstring"""
snake_case_ = Accelerator(split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
snake_case_ = Accelerator(split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
snake_case_ = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 715 |
import copy
import re
class lowerCAmelCase_ :
'''simple docstring'''
__snake_case = "hp"
__snake_case = {}
__snake_case = None
@classmethod
def UpperCamelCase__ ( cls , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def UpperCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
if len(_UpperCAmelCase ) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word ):
raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_UpperCAmelCase ) + 1 ):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_UpperCAmelCase ):
snake_case_ = ''''''
while integer != 0:
snake_case_ = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '''#''' + int_to_alphabetic(_UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def UpperCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = param_name.split('''_''' )
snake_case_ = [TrialShortNamer.shortname_for_word(_UpperCAmelCase , _UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['''''', '''_''']
for separator in separators:
snake_case_ = separator.join(_UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def UpperCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = TrialShortNamer.shortname_for_key(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def UpperCamelCase__ ( cls ):
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
snake_case_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = info
@classmethod
def UpperCamelCase__ ( cls , _UpperCAmelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['''short_param'''][k]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = 1 if v else 0
snake_case_ = '''''' if isinstance(_UpperCAmelCase , (int, float) ) else '''-'''
snake_case_ = F'''{key}{sep}{v}'''
name.append(_UpperCAmelCase )
return "_".join(_UpperCAmelCase )
@classmethod
def UpperCamelCase__ ( cls , _UpperCAmelCase ):
snake_case_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('''_''' )
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('''-''' )
else:
snake_case_ = re.sub('''[0-9.]''' , '''''' , _UpperCAmelCase )
snake_case_ = float(re.sub('''[^0-9.]''' , '''''' , _UpperCAmelCase ) )
snake_case_ = cls.NAMING_INFO['''reverse_short_param'''][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 531 | 0 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowerCamelCase__ : List[Any] = pytest.mark.integration
lowerCamelCase__ : Dict = {"""comet"""}
lowerCamelCase__ : Optional[int] = importlib.util.find_spec("""fairseq""") is not None
lowerCamelCase__ : Tuple = {"""code_eval"""}
lowerCamelCase__ : str = os.name == """nt"""
lowerCamelCase__ : int = {"""bertscore""", """frugalscore""", """perplexity"""}
lowerCamelCase__ : Tuple = importlib.util.find_spec("""transformers""") is not None
def UpperCamelCase ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
@wraps(lowercase_ )
def wrapper(self , lowercase_ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , lowercase_ )
return wrapper
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
@wraps(lowercase_ )
def wrapper(self , lowercase_ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , lowercase_ )
return wrapper
def UpperCamelCase ( lowercase_ ) -> Any:
'''simple docstring'''
@wraps(lowercase_ )
def wrapper(self , lowercase_ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , lowercase_ )
return wrapper
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
lowercase__ : Union[str, Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@local
class _snake_case ( parameterized.TestCase ):
__lowerCAmelCase : Any = {}
__lowerCAmelCase : str = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""")
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = """[...]"""
lowercase__ : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE_)).module_path)
lowercase__ : List[str] = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE_)
# check parameters
lowercase__ : Any = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(SCREAMING_SNAKE_CASE_ , metric_module.__name__):
with self.use_local_metrics():
try:
lowercase__ : int = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = """[...]"""
lowercase__ : Union[str, Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE_)).module_path)
# run doctest
with self.use_local_metrics():
lowercase__ : Union[str, Any] = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE_):
yield
else:
yield
@contextmanager
def lowercase__ ( self):
'''simple docstring'''
def load_local_metric(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
return load_metric(os.path.join("""metrics""" , SCREAMING_SNAKE_CASE_) , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
with patch("""datasets.load_metric""") as mock_load_metric:
lowercase__ : Union[str, Any] = load_local_metric
yield
@classmethod
def lowercase__ ( cls , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def wrapper(SCREAMING_SNAKE_CASE_):
lowercase__ : Any = contextmanager(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class _snake_case ( UpperCAmelCase_ ):
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
assert len(input_dict["""input_ids"""]) == 2
return np.array([1.0_3, 1.0_4])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
lowercase__ : str = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
import torch
def bert_cos_score_idf(lowercase_ , lowercase_ , *lowercase_ , **lowercase_ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowercase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
lowercase__ : int = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def UpperCamelCase ( lowercase_ ) -> Dict:
'''simple docstring'''
def load_from_checkpoint(lowercase_ ):
class _snake_case :
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
assert len(SCREAMING_SNAKE_CASE_) == 2
lowercase__ : Optional[Any] = [0.1_9, 0.9_2]
return scores, sum(SCREAMING_SNAKE_CASE_) / len(SCREAMING_SNAKE_CASE_)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
lowercase__ : Union[str, Any] = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
lowercase__ : List[Any] = load_from_checkpoint
yield
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowercase__ : Optional[Any] = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
lowercase__ : int = """ERROR"""
lowercase__ : Tuple = F'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(lowercase_ , match=re.escape(lowercase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowercase_ )
| 12 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype = jnp.floataa
__lowerCAmelCase : bool = True
def lowercase__ ( self):
'''simple docstring'''
super().setup()
lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ):
lowercase__ : int = logits.shape[-1]
lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" )
lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 )
lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase__ : Optional[int] = reduction(lowercase_ )
return loss
lowercase__ : int = partial(lowercase_ , reduction=jnp.mean )
lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _snake_case :
__lowerCAmelCase : str = "google/bigbird-roberta-base"
__lowerCAmelCase : int = 3_000
__lowerCAmelCase : int = 10_500
__lowerCAmelCase : int = 128
__lowerCAmelCase : int = 3
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 5
# tx_args
__lowerCAmelCase : float = 3e-5
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 20_000
__lowerCAmelCase : float = 0.0_095
__lowerCAmelCase : str = "bigbird-roberta-natural-questions"
__lowerCAmelCase : str = "training-expt"
__lowerCAmelCase : str = "data/nq-training.jsonl"
__lowerCAmelCase : str = "data/nq-validation.jsonl"
def lowercase__ ( self):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = os.path.join(self.base_dir , self.save_dir)
lowercase__ : str = self.batch_size_per_device * jax.device_count()
@dataclass
class _snake_case :
__lowerCAmelCase : int
__lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs
def __call__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""])
lowercase__ : str = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa),
}
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))]
while len(SCREAMING_SNAKE_CASE_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
if seed is not None:
lowercase__ : Any = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int:
'''simple docstring'''
def loss_fn(lowercase_ ):
lowercase__ : Dict = model_inputs.pop("""start_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""end_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Any = outputs
return state.loss_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ )
lowercase__ : Tuple = jax.value_and_grad(lowercase_ )
lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params )
lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" )
lowercase__ : str = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Tuple = model_inputs.pop("""start_labels""" )
lowercase__ : List[str] = model_inputs.pop("""end_labels""" )
lowercase__ : int = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs
lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class _snake_case ( train_state.TrainState ):
__lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ )
@dataclass
class _snake_case :
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable = None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : List[str] = model.params
lowercase__ : Dict = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[Any] = args
lowercase__ : Union[str, Any] = data_collator
lowercase__ : str = lr
lowercase__ : Union[str, Any] = params
lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_)
return state
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = self.args
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size
lowercase__ : int = jax.random.PRNGKey(0)
lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count())
for epoch in range(args.max_epochs):
lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
if i % args.logging_steps == 0:
lowercase__ : List[str] = jax_utils.unreplicate(state.step)
lowercase__ : str = running_loss.item() / i
lowercase__ : Tuple = self.scheduler_fn(state_step - 1)
lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_))
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size)
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size
lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : Optional[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
return running_loss / i
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """)
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params)
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib"""))
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib"""))
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f:
json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_)
print("""DONE""")
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase__ : Optional[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase__ : Dict = from_bytes(state.opt_state , f.read() )
lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) )
lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) )
with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f:
lowercase__ : int = json.load(lowercase_ )
lowercase__ : Optional[Any] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Optional[int] = num_train_steps - warmup_steps
lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ )
lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ )
lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
def weight_decay_mask(lowercase_ ):
lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ )
lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ )
return tx, lr
| 12 | 1 |
from collections import namedtuple
lowerCamelCase : List[str] = namedtuple('from_to', 'from_ to')
lowerCamelCase : Any = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_0_0_0),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0454, 2_6_4.1_7_2),
"cubicyard": from_to(0.7_6455, 1.3_0795),
"cubicfoot": from_to(0.028, 3_5.3_1_4_7),
"cup": from_to(0.0_0023_6588, 4_2_2_6.7_5),
}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ """, """.join(a_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ """, """.join(a_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ : Optional[int] = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['GLPNFeatureExtractor']
lowercase_ : str = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowercase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = KandinskyVaaPriorPipeline
lowerCAmelCase : Dict = ["""prompt"""]
lowerCAmelCase : List[Any] = ["""prompt""", """negative_prompt"""]
lowerCAmelCase : Optional[int] = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase : Dict = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(UpperCAmelCase__ )
@property
def __A ( self ):
torch.manual_seed(0 )
A__ = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
A__ = PriorTransformer(**UpperCAmelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A__ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
A__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A__ = CLIPVisionModelWithProjection(UpperCAmelCase__ )
return model
@property
def __A ( self ):
A__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ , do_resize=UpperCAmelCase__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def __A ( self ):
A__ = self.dummy_prior
A__ = self.dummy_image_encoder
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_image_processor
A__ = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase__ , clip_sample_range=10.0 , )
A__ = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCAmelCase__ )
else:
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A__ = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __A ( self ):
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__ )
A__ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
A__ = output.image_embeds
A__ = pipe(
**self.get_dummy_inputs(UpperCAmelCase__ ) , return_dict=UpperCAmelCase__ , )[0]
A__ = image[0, -10:]
A__ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A__ = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A ( self ):
A__ = torch_device == "cpu"
A__ = True
A__ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCAmelCase__ , relax_max_difference=UpperCAmelCase__ , test_mean_pixel_difference=UpperCAmelCase__ , )
@skip_mps
def __A ( self ):
A__ = torch_device == "cpu"
A__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCAmelCase__ , test_mean_pixel_difference=UpperCAmelCase__ , )
| 491 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __snake_case ,__snake_case ) -> list[list[int]]:
__lowerCAmelCase : list[list[int]] = []
create_all_state(1 ,__snake_case ,__snake_case ,[] ,__snake_case )
return result
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__snake_case ,total_number - level + 2 ):
current_list.append(__snake_case )
create_all_state(i + 1 ,__snake_case ,level - 1 ,__snake_case ,__snake_case )
current_list.pop()
def _lowercase ( __snake_case ) -> None:
for i in total_list:
print(*__snake_case )
if __name__ == "__main__":
__snake_case : int = 4
__snake_case : Union[str, Any] = 2
__snake_case : int = generate_all_combinations(n, k)
print_all_state(total_list)
| 615 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : str = [0]
__lowerCAmelCase : Union[str, Any] = [0]
__lowerCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE)
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , 0)
__lowerCAmelCase : Dict = [60]
__lowerCAmelCase : Optional[int] = [10]
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE)
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , 0)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Dict = [1, 2, 3]
__lowerCAmelCase : str = [3, 2, 1]
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE)
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , 5)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = 50
__lowerCAmelCase : Dict = [60, 100, 120]
__lowerCAmelCase : List[Any] = [10, 20, 30]
__lowerCAmelCase : int = len(_SCREAMING_SNAKE_CASE)
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , 220)
if __name__ == "__main__":
unittest.main()
| 615 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_UpperCAmelCase : str = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_UpperCAmelCase : Union[str, Any] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
if "://" in dataset_path:
lowerCamelCase__ : List[str] = dataset_path.split('://' )[1]
return dataset_path
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = not is_remote_filesystem(__UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) )
else:
fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> None:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowerCamelCase__ : str = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Optional[Any] = threading.Lock()
| 295 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *_snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 9 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=__SCREAMING_SNAKE_CASE )
_UpperCamelCase , _UpperCamelCase =[i[0] for i in r], [i[1] for i in r]
_UpperCamelCase =list(accumulate(__SCREAMING_SNAKE_CASE ) )
_UpperCamelCase =bisect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 |
'''simple docstring'''
import argparse
import datetime
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase ={
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
_UpperCamelCase ={0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__SCREAMING_SNAKE_CASE ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
_UpperCamelCase =int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
_UpperCamelCase =date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
_UpperCamelCase =int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
_UpperCamelCase =date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
_UpperCamelCase =int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
_UpperCamelCase =datetime.date(int(__SCREAMING_SNAKE_CASE ) , int(__SCREAMING_SNAKE_CASE ) , int(__SCREAMING_SNAKE_CASE ) )
# Start math
if m <= 2:
_UpperCamelCase =y - 1
_UpperCamelCase =m + 12
# maths var
_UpperCamelCase =int(str(__SCREAMING_SNAKE_CASE )[:2] )
_UpperCamelCase =int(str(__SCREAMING_SNAKE_CASE )[2:] )
_UpperCamelCase =int(2.6 * m - 5.3_9 )
_UpperCamelCase =int(c / 4 )
_UpperCamelCase =int(k / 4 )
_UpperCamelCase =int(d + k )
_UpperCamelCase =int(t + u + v + x )
_UpperCamelCase =int(z - (2 * c) )
_UpperCamelCase =round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
_UpperCamelCase =f'''Your date {date_input}, is a {days[str(__SCREAMING_SNAKE_CASE )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : List[Any] = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
__lowerCamelCase : Union[str, Any] = parser.parse_args()
zeller(args.date_input)
| 271 | 1 |
'''simple docstring'''
from typing import Any
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , A : Any ):
_UpperCAmelCase : Dict = data
_UpperCAmelCase : Dict = None
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any ):
_UpperCAmelCase : str = None
def _A ( self : Tuple ):
_UpperCAmelCase : Any = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase : int = temp.next
print()
def _A ( self : Tuple , A : int ):
_UpperCAmelCase : List[str] = Node(A )
_UpperCAmelCase : int = self.head
_UpperCAmelCase : str = new_node
def _A ( self : Dict , A : Union[str, Any] , A : Tuple ):
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : Optional[int] = node_a.next
_UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : Union[str, Any] = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = node_a.data, node_a.data
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 244 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> List[str]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Union[str, Any]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 601 | 0 |
import math
def lowerCAmelCase__ ( _a : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__ ( _a : float = 0.1 ):
snake_case_ : int = 3
snake_case_ : Optional[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_a )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : str = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = 'donut-swin'
A : Union[str, Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = image_size
snake_case_ : Any = patch_size
snake_case_ : str = num_channels
snake_case_ : Dict = embed_dim
snake_case_ : Tuple = depths
snake_case_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = num_heads
snake_case_ : Optional[int] = window_size
snake_case_ : Any = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : str = drop_path_rate
snake_case_ : List[str] = hidden_act
snake_case_ : Optional[int] = use_absolute_embeddings
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Any = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
| 114 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : List[Any] = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int = 1_0_0_0 ):
snake_case_ ,snake_case_ : List[str] = 1, 1
snake_case_ : List[str] = 2
while True:
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = fa + fa
snake_case_ ,snake_case_ : str = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 666 | 0 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(SCREAMING_SNAKE_CASE , n - 1 , SCREAMING_SNAKE_CASE ) * a) % mod
else:
lowercase__ = binary_exponentiation(SCREAMING_SNAKE_CASE , n / 2 , SCREAMING_SNAKE_CASE )
return (b * b) % mod
# a prime number
lowerCAmelCase = 701
lowerCAmelCase = 10_0000_0000
lowerCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 429 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase__ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 'albert'
def __init__( self : Any ,lowercase__ : Any=3_0_0_0_0 ,lowercase__ : Dict=1_2_8 ,lowercase__ : List[Any]=4_0_9_6 ,lowercase__ : Tuple=1_2 ,lowercase__ : Dict=1 ,lowercase__ : List[str]=6_4 ,lowercase__ : Dict=1_6_3_8_4 ,lowercase__ : List[Any]=1 ,lowercase__ : List[str]="gelu_new" ,lowercase__ : Optional[Any]=0 ,lowercase__ : Optional[Any]=0 ,lowercase__ : Optional[int]=5_1_2 ,lowercase__ : Union[str, Any]=2 ,lowercase__ : Dict=0.0_2 ,lowercase__ : Dict=1e-1_2 ,lowercase__ : List[str]=0.1 ,lowercase__ : List[Any]="absolute" ,lowercase__ : Union[str, Any]=0 ,lowercase__ : Optional[Any]=2 ,lowercase__ : List[Any]=3 ,**lowercase__ : Dict ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = inner_group_num
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = classifier_dropout_prob
__lowercase = position_embedding_type
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 41 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : str , _A : Dict , _A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A )
UpperCAmelCase__ : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Any = sd_pipe(**_A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : str = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 75 | 0 |
from __future__ import annotations
def lowerCAmelCase( __lowerCamelCase ):
create_state_space_tree(__lowerCamelCase , [] , 0 , [0 for i in range(len(__lowerCamelCase ) )] )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
if index == len(__lowerCamelCase ):
print(__lowerCamelCase )
return
for i in range(len(__lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__a = True
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 , __lowerCamelCase )
current_sequence.pop()
__a = False
lowerCamelCase_ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCamelCase_ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 246 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Dict = logging.get_logger()
@dataclass
class a__ :
A__ : nn.Module
A__ : List[nn.Module] = field(default_factory=__snake_case )
A__ : list = field(default_factory=__snake_case )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
__a = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase , nn.Convad ) or isinstance(UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase )
def __call__( self , UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a__ :
A__ : nn.Module
A__ : nn.Module
A__ : int = 0
A__ : List = field(default_factory=__snake_case )
A__ : List = field(default_factory=__snake_case )
def __call__( self , UpperCAmelCase ) -> List[str]:
__a = Tracker(self.dest )(UpperCAmelCase ).parametrized
__a = Tracker(self.src )(UpperCAmelCase ).parametrized
__a = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.src_skip , UpperCAmelCase ) )
__a = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.dest_skip , UpperCAmelCase ) )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(UpperCAmelCase )} operations while'''
f''' destination module has {len(UpperCAmelCase )}.''' )
for dest_m, src_m in zip(UpperCAmelCase , UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ):
print(f'''Converting {name}...''' )
with torch.no_grad():
__a = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ).eval()
__a = ResNetForImageClassification(__lowerCamelCase ).eval()
__a = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCamelCase )
assert torch.allclose(from_model(__lowerCamelCase ) , our_model(__lowerCamelCase ).logits ), "The model logits don't match the original one."
__a = f'''resnet{"-".join(name.split("resnet" ) )}'''
print(__lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=__lowerCamelCase , )
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=__lowerCamelCase , )
print(f'''Pushed {checkpoint_name}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ):
__a = 'imagenet-1k-id2label.json'
__a = 1000
__a = (1, num_labels)
__a = 'huggingface/label-files'
__a = num_labels
__a = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='dataset' ) , 'r' ) )
__a = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
__a = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(__lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase_ : Any = parser.parse_args()
lowerCamelCase_ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 246 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case :
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.0_2 , a__=3 , a__=None , ) -> int:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = num_patches + 1
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = TFViTModel(config=a__ )
snake_case_ = model(a__ , training=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case_ = self.image_size // 2
snake_case_ = pixel_values[:, :, :image_size, :image_size]
snake_case_ = model(a__ , interpolate_pos_encoding=a__ , training=a__ )
snake_case_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = self.type_sequence_label_size
snake_case_ = TFViTForImageClassification(a__ )
snake_case_ = model(a__ , labels=a__ , training=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case_ = self.image_size // 2
snake_case_ = pixel_values[:, :, :image_size, :image_size]
snake_case_ = model(a__ , interpolate_pos_encoding=a__ , training=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = TFViTForImageClassification(a__ )
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCAmelCase_ : int = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : str = False
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = TFViTModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , tf.keras.layers.Layer ) )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(a__ )
snake_case_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(a__ )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=a__ , return_tensors="tf" )
# forward pass
snake_case_ = model(**a__ )
# verify the logits
snake_case_ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , a__ )
snake_case_ = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , a__ , atol=1e-4 )
| 400 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
snake_case_ = str(bin(snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
snake_case_ = str(bin(snake_case ) )[2:]
if shift_amount >= len(snake_case ):
return "0b0"
snake_case_ = binary_number[: len(snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
snake_case_ = "0" + str(bin(snake_case ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case_ = len(bin(snake_case )[3:] ) # Find 2's complement of number
snake_case_ = bin(abs(snake_case ) - (1 << binary_number_length) )[3:]
snake_case_ = (
"1" + "0" * (binary_number_length - len(snake_case )) + binary_number
)
if shift_amount >= len(snake_case ):
return "0b" + binary_number[0] * len(snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 400 | 1 |
'''simple docstring'''
def lowercase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'{price_plus_tax(100, 0.25) = }')
print(f'{price_plus_tax(125.50, 0.05) = }')
| 700 |
'''simple docstring'''
def lowercase ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : list[int]):
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path)
def lowercase ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : list[int] , lowerCAmelCase : int):
"""simple docstring"""
if curr_ind == len(lowerCAmelCase):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(lowerCAmelCase)):
if valid_connection(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase):
# Insert current vertex into path as next transition
_A : Tuple = next_ver
# Validate created path
if util_hamilton_cycle(lowerCAmelCase , lowerCAmelCase , curr_ind + 1):
return True
# Backtrack
_A : Optional[int] = -1
return False
def lowercase ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : int = 0):
"""simple docstring"""
_A : Optional[Any] = [-1] * (len(lowerCAmelCase) + 1)
# initialize start and end of path with starting index
_A : int = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(lowerCAmelCase , lowerCAmelCase , 1) else []
| 417 | 0 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
lowerCamelCase__ : List[Any] = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
lowerCamelCase__ : int = {
"""jukebox""": 5_1_2,
}
class __magic_name__ (__UpperCamelCase ):
'''simple docstring'''
__lowercase : List[Any] = VOCAB_FILES_NAMES
__lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Union[str, Any] = PRETRAINED_LYRIC_TOKENS_SIZES
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self:int , _a:Optional[int] , _a:Tuple , _a:Tuple , _a:Tuple=["v3", "v2", "v2"] , _a:Optional[Any]=5_12 , _a:List[Any]=5 , _a:List[Any]="<|endoftext|>" , **_a:Tuple , ):
snake_case__ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
super().__init__(
unk_token=_a , n_genres=_a , version=_a , max_n_lyric_tokens=_a , **_a , )
snake_case__ = version
snake_case__ = max_n_lyric_tokens
snake_case__ = n_genres
with open(_a , encoding='''utf-8''' ) as vocab_handle:
snake_case__ = json.load(_a )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
snake_case__ = json.load(_a )
with open(_a , encoding='''utf-8''' ) as vocab_handle:
snake_case__ = json.load(_a )
snake_case__ = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
snake_case__ = oov.replace(r'''\-\'''' , r'''\-+\'''' )
snake_case__ = regex.compile(_a )
snake_case__ = {v: k for k, v in self.artists_encoder.items()}
snake_case__ = {v: k for k, v in self.genres_encoder.items()}
snake_case__ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Optional[Any] , _a:Any , _a:int ):
snake_case__ = [self.artists_encoder.get(_a , 0 ) for artist in list_artists]
for genres in range(len(_a ) ):
snake_case__ = [self.genres_encoder.get(_a , 0 ) for genre in list_genres[genres]]
snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case__ = [[self.lyrics_encoder.get(_a , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:str ):
return list(_a )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:List[str] , _a:Optional[int] , _a:Optional[int] , **_a:Any ):
snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(_a , _a , _a )
snake_case__ = self._tokenize(_a )
return artist, genre, lyrics
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:List[Any] , _a:int , _a:str , _a:Optional[int] = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case__ = artists[idx].lower()
snake_case__ = [genres[idx].lower()]
else:
snake_case__ = self._normalize(artists[idx] ) + '''.v2'''
snake_case__ = [
self._normalize(_a ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case__ = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
snake_case__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
snake_case__ = {vocab[index]: index + 1 for index in range(len(_a ) )}
snake_case__ = 0
snake_case__ = len(_a ) + 1
snake_case__ = self.vocab
snake_case__ = {v: k for k, v in self.vocab.items()}
snake_case__ = ''''''
else:
snake_case__ = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
snake_case__ = self._run_strip_accents(_a )
snake_case__ = lyrics.replace('''\\''' , '''\n''' )
snake_case__ = self.out_of_vocab.sub('''''' , _a ), [], []
return artists, genres, lyrics
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:str ):
snake_case__ = unicodedata.normalize('''NFD''' , _a )
snake_case__ = []
for char in text:
snake_case__ = unicodedata.category(_a )
if cat == "Mn":
continue
output.append(_a )
return "".join(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:List[str] ):
snake_case__ = (
[chr(_a ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(_a ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(_a ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
snake_case__ = frozenset(_a )
snake_case__ = re.compile(r'''_+''' )
snake_case__ = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
snake_case__ = pattern.sub('''_''' , _a ).strip('''_''' )
return text
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Dict ):
return " ".join(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:Any , _a:List[Any] = None , _a:Any = False ):
# Convert to TensorType
if not isinstance(_a , _a ):
snake_case__ = TensorType(_a )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
snake_case__ = tf.constant
snake_case__ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
snake_case__ = torch.tensor
snake_case__ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
snake_case__ = jnp.array
snake_case__ = _is_jax
else:
snake_case__ = np.asarray
snake_case__ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case__ = [inputs]
if not is_tensor(_a ):
snake_case__ = as_tensor(_a )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self:Dict , _a:List[str] , _a:Optional[int] , _a:Optional[int]="" , _a:str="pt" ):
snake_case__ = [0, 0, 0]
snake_case__ = [artist] * len(self.version )
snake_case__ = [genres] * len(self.version )
snake_case__ , snake_case__ , snake_case__ = self.tokenize(_a , _a , _a )
snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(_a , _a , _a )
snake_case__ = [-INFINITY] * len(full_tokens[-1] )
snake_case__ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_a )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:List[str] , _a:Any = None ):
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_a ) )
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_a ) )
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_a ) )
return (artists_file, genres_file, lyrics_file)
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:str , _a:Any , _a:int ):
snake_case__ = self.artists_decoder.get(_a )
snake_case__ = [self.genres_decoder.get(_a ) for genre in genres_index]
snake_case__ = [self.lyrics_decoder.get(_a ) for character in lyric_index]
return artist, genres, lyrics
| 33 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 672 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__magic_name__ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=__lowerCAmelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=__lowerCAmelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=__lowerCAmelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=__lowerCAmelCase , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=__lowerCAmelCase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=__lowerCAmelCase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=__lowerCAmelCase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
snake_case__ = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
def fn(__lowerCAmelCase ):
return tokenizer(examples["text"] )
return fn
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = []
for i in range(len(tokenized_data["input_ids"] ) ):
snake_case__ = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
snake_case__ = tf.train.Features(feature=__lowerCAmelCase )
snake_case__ = tf.train.Example(features=__lowerCAmelCase )
snake_case__ = example.SerializeToString()
records.append(__lowerCAmelCase )
return records
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
snake_case__ = min(len(__lowerCAmelCase ) , args.limit )
snake_case__ = dataset.select(range(__lowerCAmelCase ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
snake_case__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
else:
snake_case__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
snake_case__ = tokenize_function(__lowerCAmelCase )
snake_case__ = dataset.map(__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__lowerCAmelCase ):
# Concatenate all texts.
snake_case__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
snake_case__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
snake_case__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case__ = {
k: [t[i : i + args.max_length] for i in range(0 , __lowerCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case__ = dataset_tokenized.map(__lowerCAmelCase , batched=__lowerCAmelCase , batch_size=1_000 , num_proc=4 )
snake_case__ = 0
snake_case__ = 0
for shard in range(0 , len(__lowerCAmelCase ) , args.shard_size ):
snake_case__ = grouped_dataset[shard : shard + args.shard_size]
snake_case__ = len(dataset_snapshot["input_ids"] )
snake_case__ = os.path.join(__lowerCAmelCase , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
snake_case__ = get_serialized_examples(__lowerCAmelCase )
with tf.io.TFRecordWriter(__lowerCAmelCase ) as out_file:
for i in range(len(__lowerCAmelCase ) ):
snake_case__ = serialized_examples[i]
out_file.write(__lowerCAmelCase )
print("Wrote file {} containing {} records".format(__lowerCAmelCase , __lowerCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=__lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ = parse_args()
main(args)
| 714 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return None
class _SCREAMING_SNAKE_CASE :
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_A : int = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def A_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase , "tf" , 12 , **lowerCamelCase )
@require_torch
@slow
def A_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase , "pt" , 12 , **lowerCamelCase )
@require_torch
@slow
def A_ ( self ):
from transformers import BertModel
snake_case__ = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(lowerCamelCase ) )
vocab_file.flush()
snake_case__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
snake_case__ = BertModel(BertConfig(vocab_size=len(lowerCamelCase ) ) )
model.save_pretrained(lowerCamelCase )
self._test_export(lowerCamelCase , "pt" , 12 , lowerCamelCase )
@require_tf
@slow
def A_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case__ = self._test_export(lowerCamelCase , "tf" , 12 , **lowerCamelCase )
snake_case__ = quantize(Path(lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def A_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case__ = self._test_export(lowerCamelCase , "pt" , 12 , **lowerCamelCase )
snake_case__ = quantize(lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , **lowerCamelCase ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
snake_case__ = Path(lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase )
return path
except Exception as e:
self.fail(lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def A_ ( self ):
from transformers import BertModel
snake_case__ = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
snake_case__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCamelCase , lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def A_ ( self ):
from transformers import TFBertModel
snake_case__ = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
snake_case__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCamelCase , lowerCamelCase , "tf" )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = FeatureExtractionPipeline(lowerCamelCase , lowerCamelCase )
snake_case__ = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
snake_case__ , snake_case__ , snake_case__ , snake_case__ = infer_shapes(lowerCamelCase , lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def A_ ( self ):
snake_case__ = ["input_ids", "attention_mask", "token_type_ids"]
snake_case__ = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
snake_case__ , snake_case__ = ensure_valid_input(FuncContiguousArgs() , lowerCamelCase , lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase ) , set(lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
snake_case__ , snake_case__ = ensure_valid_input(FuncNonContiguousArgs() , lowerCamelCase , lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase ) , 1 )
self.assertEqual(len(lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def A_ ( self ):
snake_case__ = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 530 | 0 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A :
def __init__( self : Tuple ) -> str:
"""simple docstring"""
_a = {}
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=1 ) -> Dict:
"""simple docstring"""
if self.graph.get(lowerCAmelCase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_a = [[w, v]]
if not self.graph.get(lowerCAmelCase_ ):
_a = []
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return list(self.graph )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ) -> str:
"""simple docstring"""
if self.graph.get(lowerCAmelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase_ )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Dict=-2 , lowerCAmelCase_ : List[str]=-1 ) -> Union[str, Any]:
"""simple docstring"""
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase_ ) != 0:
_a = stack[len(lowerCAmelCase_ ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return visited
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Optional[int]=-1 ) -> str:
"""simple docstring"""
if c == -1:
_a = floor(random() * 1_00_00 ) + 10
for i in range(lowerCAmelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Dict=-2 ) -> Optional[int]:
"""simple docstring"""
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
_a = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
return len(self.graph[u] )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[str]=-2 ) -> int:
"""simple docstring"""
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
_a = s
_a = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase_ ) != 0:
_a = stack[len(lowerCAmelCase_ ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return sorted_nodes
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(lowerCAmelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(lowerCAmelCase_ ) != 0:
_a = stack[len(lowerCAmelCase_ ) - 1]
else:
_a = False
indirect_parents.append(lowerCAmelCase_ )
_a = s
_a = ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return list(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(lowerCAmelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(lowerCAmelCase_ ) != 0:
_a = stack[len(lowerCAmelCase_ ) - 1]
else:
_a = False
indirect_parents.append(lowerCAmelCase_ )
_a = s
_a = ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return False
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any=-2 , lowerCAmelCase_ : Optional[Any]=-1 ) -> Optional[Any]:
"""simple docstring"""
_a = time()
self.dfs(lowerCAmelCase_ , lowerCAmelCase_ )
_a = time()
return end - begin
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Tuple=-2 ) -> List[str]:
"""simple docstring"""
_a = time()
self.bfs(lowerCAmelCase_ )
_a = time()
return end - begin
class A :
def __init__( self : List[Any] ) -> Any:
"""simple docstring"""
_a = {}
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=1 ) -> Dict:
"""simple docstring"""
if self.graph.get(lowerCAmelCase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_a = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_a = [[w, u]]
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> int:
"""simple docstring"""
if self.graph.get(lowerCAmelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase_ )
# the other way round
if self.graph.get(lowerCAmelCase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Any=-2 , lowerCAmelCase_ : Optional[int]=-1 ) -> Optional[int]:
"""simple docstring"""
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase_ ) != 0:
_a = stack[len(lowerCAmelCase_ ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return visited
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Union[str, Any]=-1 ) -> Any:
"""simple docstring"""
if c == -1:
_a = floor(random() * 1_00_00 ) + 10
for i in range(lowerCAmelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[Any]=-2 ) -> Optional[Any]:
"""simple docstring"""
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] ) -> Any:
"""simple docstring"""
return len(self.graph[u] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(lowerCAmelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(lowerCAmelCase_ ) != 0:
_a = stack[len(lowerCAmelCase_ ) - 1]
else:
_a = False
indirect_parents.append(lowerCAmelCase_ )
_a = s
_a = ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return list(lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(lowerCAmelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(lowerCAmelCase_ ) != 0:
_a = stack[len(lowerCAmelCase_ ) - 1]
else:
_a = False
indirect_parents.append(lowerCAmelCase_ )
_a = s
_a = ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return False
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return list(self.graph )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int=-2 , lowerCAmelCase_ : int=-1 ) -> List[Any]:
"""simple docstring"""
_a = time()
self.dfs(lowerCAmelCase_ , lowerCAmelCase_ )
_a = time()
return end - begin
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Dict=-2 ) -> int:
"""simple docstring"""
_a = time()
self.bfs(lowerCAmelCase_ )
_a = time()
return end - begin
| 22 |
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> bool:
A__ : List[Any] =len(snake_case_ ) + 1
A__ : List[Any] =len(snake_case_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
A__ : Dict =[[0 for i in range(snake_case_ )] for j in range(snake_case_ )]
# since string of zero length match pattern of zero length
A__ : Dict =1
# since pattern of zero length will never match with string of non-zero length
for i in range(1, snake_case_ ):
A__ : Optional[Any] =0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1, snake_case_ ):
A__ : str =dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1, snake_case_ ):
for j in range(1, snake_case_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
A__ : str =dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
A__ : str =1
elif pattern[j - 2] in (input_string[i - 1], "."):
A__ : Union[str, Any] =dp[i - 1][j]
else:
A__ : Optional[int] =0
else:
A__ : str =0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCamelCase : int = "aab"
__lowerCamelCase : Dict = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 416 | 0 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 203 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_snake_case : Any = "bert-base-cased"
_snake_case : List[Any] = "google/pegasus-xsum"
_snake_case : Dict = [" Sam ate lunch today.", "Sams lunch ingredients."]
_snake_case : Dict = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
_snake_case : List[str] = "patrickvonplaten/t5-tiny-random"
_snake_case : Optional[Any] = "sshleifer/bart-tiny-random"
_snake_case : Optional[Any] = "sshleifer/tiny-mbart"
_snake_case : Tuple = "sshleifer/tiny-marian-en-de"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Union[str, Any] = "\n".join(__lowerCamelCase )
Path(__lowerCamelCase ).open("w" ).writelines(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCamelCase , F'{split}.source' ) , __lowerCamelCase )
_dump_articles(os.path.join(__lowerCamelCase , F'{split}.target' ) , __lowerCamelCase )
return tmp_dir
class a (_lowerCAmelCase ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __snake_case ( self : List[Any] , lowerCamelCase : int ) -> Union[str, Any]:
__snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase )
__snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase ) ) for a in ARTICLES )
__snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase ) ) for a in SUMMARIES )
__snake_case : str = 4
__snake_case : Optional[int] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__snake_case , __snake_case : Optional[int] = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
__snake_case : List[str] = SeqaSeqDataset(
lowerCamelCase , data_dir=lowerCamelCase , type_path="train" , max_source_length=lowerCamelCase , max_target_length=lowerCamelCase , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase , )
__snake_case : str = DataLoader(lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase , lowerCamelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__snake_case : int = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] ) -> Optional[Any]:
__snake_case : Any = AutoTokenizer.from_pretrained(lowerCamelCase )
__snake_case : List[str] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase ) ) for a in ARTICLES )
__snake_case : Tuple = max(len(tokenizer.encode(lowerCamelCase ) ) for a in SUMMARIES )
__snake_case : List[str] = 4
__snake_case : List[str] = LegacySeqaSeqDataset(
lowerCamelCase , data_dir=lowerCamelCase , type_path="train" , max_source_length=20 , max_target_length=lowerCamelCase , )
__snake_case : int = DataLoader(lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __snake_case ( self : List[str] ) -> int:
__snake_case : str = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
__snake_case : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__snake_case : str = tmp_dir.joinpath("train.source" ).open().readlines()
__snake_case : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase , lowerCamelCase , 128 , lowerCamelCase )
__snake_case : str = {x.name for x in tmp_dir.iterdir()}
__snake_case : Optional[int] = {x.name for x in save_dir.iterdir()}
__snake_case : Optional[int] = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase ) < len(lowerCamelCase )
assert len(lowerCamelCase ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __snake_case ( self : Tuple ) -> str:
if not FAIRSEQ_AVAILABLE:
return
__snake_case , __snake_case , __snake_case : Optional[Any] = self._get_dataset(max_len=64 )
__snake_case : Optional[int] = 64
__snake_case : List[Any] = ds.make_dynamic_sampler(lowerCamelCase , required_batch_size_multiple=lowerCamelCase )
__snake_case : str = [len(lowerCamelCase ) for x in batch_sampler]
assert len(set(lowerCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase ) == len(lowerCamelCase ) # no dropped or added examples
__snake_case : Optional[Any] = DataLoader(lowerCamelCase , batch_sampler=lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
__snake_case : Union[str, Any] = []
__snake_case : str = []
for batch in data_loader:
__snake_case : str = batch["input_ids"].shape
__snake_case : Dict = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__snake_case : str = np.product(batch["input_ids"].shape )
num_src_per_batch.append(lowerCamelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase )
assert num_src_per_batch[0] == max(lowerCamelCase )
if failures:
raise AssertionError(F'too many tokens in {len(lowerCamelCase )} batches' )
def __snake_case ( self : int ) -> Any:
__snake_case , __snake_case , __snake_case : Union[str, Any] = self._get_dataset(max_len=512 )
__snake_case : Union[str, Any] = 2
__snake_case : List[str] = ds.make_sortish_sampler(lowerCamelCase , shuffle=lowerCamelCase )
__snake_case : Dict = DataLoader(lowerCamelCase , batch_size=lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
__snake_case : List[Any] = DataLoader(lowerCamelCase , batch_size=lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase )
__snake_case : List[Any] = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]="input_ids" ):
return [batch[k].eq(lowerCamelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase , k="labels" ) ) < sum(count_pad_tokens(lowerCamelCase , k="labels" ) )
assert sum(count_pad_tokens(lowerCamelCase ) ) < sum(count_pad_tokens(lowerCamelCase ) )
assert len(lowerCamelCase ) == len(lowerCamelCase )
def __snake_case ( self : Any , lowerCamelCase : List[Any]=1000 , lowerCamelCase : Union[str, Any]=128 ) -> Any:
if os.getenv("USE_REAL_DATA" , lowerCamelCase ):
__snake_case : int = "examples/seq2seq/wmt_en_ro"
__snake_case : Union[str, Any] = max_len * 2 * 64
if not Path(lowerCamelCase ).joinpath("train.len" ).exists():
save_len_file(lowerCamelCase , lowerCamelCase )
else:
__snake_case : List[str] = "examples/seq2seq/test_data/wmt_en_ro"
__snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase , lowerCamelCase )
__snake_case : Dict = AutoTokenizer.from_pretrained(lowerCamelCase )
__snake_case : int = SeqaSeqDataset(
lowerCamelCase , data_dir=lowerCamelCase , type_path="train" , max_source_length=lowerCamelCase , max_target_length=lowerCamelCase , n_obs=lowerCamelCase , )
return ds, max_tokens, tokenizer
def __snake_case ( self : Tuple ) -> Dict:
__snake_case , __snake_case , __snake_case : Any = self._get_dataset()
__snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase ) )
__snake_case : int = set(DistributedSortishSampler(lowerCamelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase ) )
assert idsa.intersection(lowerCamelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __snake_case ( self : List[str] , lowerCamelCase : Union[str, Any] ) -> str:
__snake_case : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase , use_fast=lowerCamelCase )
if tok_name == MBART_TINY:
__snake_case : Dict = SeqaSeqDataset(
lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
__snake_case : Any = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__snake_case : Union[str, Any] = SeqaSeqDataset(
lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
__snake_case : Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase ) == 1 if tok_name == BART_TINY else len(lowerCamelCase ) == 0
| 203 | 1 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.2 , SCREAMING_SNAKE_CASE_ : Dict=0.2 ):
lowerCamelCase__ = bp_numa
lowerCamelCase__ = bp_numa
lowerCamelCase__ = bp_numa
lowerCamelCase__ = conva_get[:2]
lowerCamelCase__ = conva_get[2]
lowerCamelCase__ = size_pa
lowerCamelCase__ = rate_w
lowerCamelCase__ = rate_t
lowerCamelCase__ = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCamelCase__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase__ = -2 * np.random.rand(self.conva[1] ) + 1
lowerCamelCase__ = -2 * np.random.rand(self.num_bpa ) + 1
lowerCamelCase__ = -2 * np.random.rand(self.num_bpa ) + 1
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : Any ):
lowerCamelCase__ = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(SCREAMING_SNAKE_CASE_ , """wb""" ) as f:
pickle.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(f"""Model saved: {save_path}""" )
@classmethod
def __UpperCAmelCase ( cls : Any , SCREAMING_SNAKE_CASE_ : int ):
with open(SCREAMING_SNAKE_CASE_ , """rb""" ) as f:
lowerCamelCase__ = pickle.load(SCREAMING_SNAKE_CASE_ ) # noqa: S301
lowerCamelCase__ = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
lowerCamelCase__ = model_dic.get("""size_pooling1""" )
lowerCamelCase__ = model_dic.get("""num_bp1""" )
lowerCamelCase__ = model_dic.get("""num_bp2""" )
lowerCamelCase__ = model_dic.get("""num_bp3""" )
lowerCamelCase__ = model_dic.get("""rate_weight""" )
lowerCamelCase__ = model_dic.get("""rate_thre""" )
# create model instance
lowerCamelCase__ = CNN(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# modify model parameter
lowerCamelCase__ = model_dic.get("""w_conv1""" )
lowerCamelCase__ = model_dic.get("""wkj""" )
lowerCamelCase__ = model_dic.get("""vji""" )
lowerCamelCase__ = model_dic.get("""thre_conv1""" )
lowerCamelCase__ = model_dic.get("""thre_bp2""" )
lowerCamelCase__ = model_dic.get("""thre_bp3""" )
return conv_ins
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
return round(SCREAMING_SNAKE_CASE_ , 3 )
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCamelCase__ = convs[0]
lowerCamelCase__ = convs[1]
lowerCamelCase__ = np.shape(SCREAMING_SNAKE_CASE_ )[0]
# get the data slice of original image data, data_focus
lowerCamelCase__ = []
for i_focus in range(0 , size_data - size_conv + 1 , SCREAMING_SNAKE_CASE_ ):
for j_focus in range(0 , size_data - size_conv + 1 , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(SCREAMING_SNAKE_CASE_ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCamelCase__ = []
lowerCamelCase__ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = []
for i_focus in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase__ = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = np.asmatrix(SCREAMING_SNAKE_CASE_ ).reshape(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
data_featuremap.append(SCREAMING_SNAKE_CASE_ )
# expanding the data slice to One dimenssion
lowerCamelCase__ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ )
return focus_list, data_featuremap
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any]="average_pool" ):
lowerCamelCase__ = len(featuremaps[0] )
lowerCamelCase__ = int(size_map / size_pooling )
lowerCamelCase__ = []
for i_map in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase__ = featuremaps[i_map]
lowerCamelCase__ = []
for i_focus in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for j_focus in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(SCREAMING_SNAKE_CASE_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = np.asmatrix(SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
featuremap_pooled.append(SCREAMING_SNAKE_CASE_ )
return featuremap_pooled
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCamelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase__ = np.shape(data[i] )
lowerCamelCase__ = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCamelCase__ = data_listed.getA().tolist()[0]
data_expanded.extend(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ )
return data_expanded
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCamelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = np.shape(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCamelCase__ = []
lowerCamelCase__ = 0
for i_map in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = np.ones((size_map, size_map) )
for i in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for j in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = pd_pool[
i_pool
]
lowerCamelCase__ = i_pool + 1
lowerCamelCase__ = np.multiply(
SCREAMING_SNAKE_CASE_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(SCREAMING_SNAKE_CASE_ )
return pd_all
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any]=bool ):
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(SCREAMING_SNAKE_CASE_ )) )
print((""" - - Shape: Teach_Data """, np.shape(SCREAMING_SNAKE_CASE_ )) )
lowerCamelCase__ = 0
lowerCamelCase__ = []
lowerCamelCase__ = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowerCamelCase__ = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(SCREAMING_SNAKE_CASE_ ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCamelCase__ = np.asmatrix(datas_train[p] )
lowerCamelCase__ = np.asarray(datas_teach[p] )
lowerCamelCase__ = self.convolute(
SCREAMING_SNAKE_CASE_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase__ = self.pooling(SCREAMING_SNAKE_CASE_ , self.size_poolinga )
lowerCamelCase__ = np.shape(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = self._expand(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = data_bp_input
lowerCamelCase__ = np.dot(SCREAMING_SNAKE_CASE_ , self.vji.T ) - self.thre_bpa
lowerCamelCase__ = self.sig(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = np.dot(SCREAMING_SNAKE_CASE_ , self.wkj.T ) - self.thre_bpa
lowerCamelCase__ = self.sig(SCREAMING_SNAKE_CASE_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCamelCase__ = np.multiply(
(data_teach - bp_outa) , np.multiply(SCREAMING_SNAKE_CASE_ , (1 - bp_outa) ) )
lowerCamelCase__ = np.multiply(
np.dot(SCREAMING_SNAKE_CASE_ , self.wkj ) , np.multiply(SCREAMING_SNAKE_CASE_ , (1 - bp_outa) ) )
lowerCamelCase__ = np.dot(SCREAMING_SNAKE_CASE_ , self.vji )
lowerCamelCase__ = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCamelCase__ = pd_conva_pooled.T.getA().tolist()
lowerCamelCase__ = self._calculate_gradient_from_pool(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCamelCase__ = self._expand_mat(pd_conva_all[k_conv] )
lowerCamelCase__ = self.rate_weight * np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCamelCase__ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCamelCase__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCamelCase__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCamelCase__ = self.thre_bpa - pd_k_all * self.rate_thre
lowerCamelCase__ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCamelCase__ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCamelCase__ = rp + 1
lowerCamelCase__ = error_count / patterns
all_mse.append(SCREAMING_SNAKE_CASE_ )
def draw_error():
lowerCamelCase__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(SCREAMING_SNAKE_CASE_ , """+-""" )
plt.plot(SCREAMING_SNAKE_CASE_ , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(SCREAMING_SNAKE_CASE_ , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCamelCase__ = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(SCREAMING_SNAKE_CASE_ )) )
for p in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase__ = np.asmatrix(datas_test[p] )
lowerCamelCase__ = self.convolute(
SCREAMING_SNAKE_CASE_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase__ = self.pooling(SCREAMING_SNAKE_CASE_ , self.size_poolinga )
lowerCamelCase__ = self._expand(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = data_bp_input
lowerCamelCase__ = bp_outa * self.vji.T - self.thre_bpa
lowerCamelCase__ = self.sig(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = bp_outa * self.wkj.T - self.thre_bpa
lowerCamelCase__ = self.sig(SCREAMING_SNAKE_CASE_ )
produce_out.extend(bp_outa.getA().tolist() )
lowerCamelCase__ = [list(map(self.do_round , SCREAMING_SNAKE_CASE_ ) ) for each in produce_out]
return np.asarray(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCamelCase__ = np.asmatrix(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = self.convolute(
SCREAMING_SNAKE_CASE_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase__ = self.pooling(SCREAMING_SNAKE_CASE_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 129 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None ) -> list[list[str]]:
UpperCamelCase__ : Tuple = word_bank or []
# create a table
UpperCamelCase__ : int = len(__lowerCAmelCase ) + 1
UpperCamelCase__ : list[list[list[str]]] = []
for _ in range(__lowerCAmelCase ):
table.append([] )
# seed value
UpperCamelCase__ : int = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCAmelCase )] == word:
UpperCamelCase__ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCAmelCase )]:
combination.reverse()
return table[len(__lowerCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 228 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=[0.5, 0.5, 0.5] , UpperCAmelCase__=[0.5, 0.5, 0.5] , UpperCAmelCase__=True , UpperCAmelCase__=1 / 255 , UpperCAmelCase__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def lowerCAmelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__=False ):
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE__ = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = DetaImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_rescale" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_pad" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE__ = DetaImageProcessor()
SCREAMING_SNAKE_CASE__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase__ ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase__ , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase__ ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase__ ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase__ ) )
@slow
def lowerCAmelCase__ ( self ):
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE__ = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase__ ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase__ , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase__ ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCAmelCase__ )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase__ ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase__ ) )
| 112 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = CustomTokenizer
pass
| 112 | 1 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : int = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : str =DebertaVaTokenizer
lowercase : Tuple =DebertaVaTokenizerFast
lowercase : Optional[int] =True
lowercase : Optional[Any] =True
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = DebertaVaTokenizer(UpperCamelCase__ , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = "this is a test"
UpperCAmelCase = "this is a test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = "<pad>"
UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(UpperCamelCase__ ) , 3_00_01 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase = "I was born in 92000, and this is falsé."
UpperCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = "I was born in 92000, and this is falsé."
UpperCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = "I was born in 92000, and this is falsé."
UpperCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = "I was born in 92000, and this is falsé."
UpperCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = "I was born in 92000, and this is falsé."
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = "This is a test"
UpperCAmelCase = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase = DebertaVaTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
UpperCAmelCase = DebertaVaTokenizerFast(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# fmt: off
UpperCAmelCase = "I was born in 92000, and this is falsé."
UpperCAmelCase = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase = DebertaVaTokenizer(UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode("sequence builders" )
UpperCAmelCase = tokenizer.encode("multi-sequence build" )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase__ , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 323 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCamelCase : List[Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
__lowerCamelCase : int = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
__lowerCamelCase : Optional[int] = "|".join(sys.argv[1:])
__lowerCamelCase : Dict = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__lowerCamelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 323 | 1 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 718 |
from __future__ import annotations
from collections import Counter
from random import random
class _a :
"""simple docstring"""
def __init__( self ):
_lowercase ={}
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase ={}
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if nodea not in self.connections:
self.add_node(lowerCAmelCase_ )
if nodea not in self.connections:
self.add_node(lowerCAmelCase_ )
_lowercase =probability
def __lowerCAmelCase ( self ):
return list(self.connections )
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =0
_lowercase =random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCamelCase ( __a : str , __a : list[tuple[str, str, float]] , __a : int ) -> dict[str, int]:
_lowercase =MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__a , __a , __a )
_lowercase =Counter(graph.get_nodes() )
_lowercase =start
for _ in range(__a ):
_lowercase =graph.transition(__a )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 594 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ : int = '''src/diffusers'''
UpperCAmelCase_ : List[Any] = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase_ : str = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase_ : int = spec.loader.load_module()
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, Any] ,a__ : str ) -> Dict:
return line.startswith(a__ ) or len(a__ ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" ,a__ ) is not None
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ) -> Union[str, Any]:
__A : int = object_name.split(""".""" )
__A : List[str] = 0
# First let's find the module where our object lives.
__A : Any = parts[i]
while i < len(a__ ) and not os.path.isfile(os.path.join(a__ ,f"""{module}.py""" ) ):
i += 1
if i < len(a__ ):
__A : Any = os.path.join(a__ ,parts[i] )
if i >= len(a__ ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(a__ ,f"""{module}.py""" ) ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
__A : int = f.readlines()
# Now let's find the class / func in the code!
__A : int = """"""
__A : Tuple = 0
for name in parts[i + 1 :]:
while (
line_index < len(a__ ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" ,lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(a__ ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__A : Optional[int] = line_index
while line_index < len(a__ ) and _should_continue(lines[line_index] ,a__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__A : Dict = lines[start_index:line_index]
return "".join(a__ )
UpperCAmelCase_ : Tuple = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
UpperCAmelCase_ : List[Any] = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
UpperCAmelCase_ : Tuple = re.compile(r'''<FILL\s+[^>]*>''')
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ) -> List[Any]:
__A : Optional[int] = code.split("""\n""" )
__A : Optional[int] = 0
while idx < len(a__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(a__ ):
return re.search(r"""^(\s*)\S""" ,lines[idx] ).groups()[0]
return ""
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ) -> Dict:
__A : str = len(get_indent(a__ ) ) > 0
if has_indent:
__A : List[Any] = f"""class Bla:\n{code}"""
__A : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 ,preview=a__ )
__A : int = black.format_str(a__ ,mode=a__ )
__A , __A : Optional[int] = style_docstrings_in_code(a__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str=False ) -> Tuple:
with open(a__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
__A : int = f.readlines()
__A : str = []
__A : Union[str, Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(a__ ):
__A : str = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__A , __A , __A : List[str] = search.groups()
__A : Any = find_code_in_diffusers(a__ )
__A : Dict = get_indent(a__ )
__A : int = line_index + 1 if indent == theoretical_indent else line_index + 2
__A : Optional[int] = theoretical_indent
__A : List[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__A : Tuple = True
while line_index < len(a__ ) and should_continue:
line_index += 1
if line_index >= len(a__ ):
break
__A : Optional[int] = lines[line_index]
__A : Optional[int] = _should_continue(a__ ,a__ ) and re.search(f"""^{indent}# End copy""" ,a__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__A : int = lines[start_index:line_index]
__A : Optional[Any] = """""".join(a__ )
# Remove any nested `Copied from` comments to avoid circular copies
__A : Dict = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(a__ ) is None]
__A : Dict = """\n""".join(a__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(a__ ) > 0:
__A : Tuple = replace_pattern.replace("""with""" ,"""""" ).split(""",""" )
__A : Optional[int] = [_re_replace_pattern.search(a__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__A , __A , __A : int = pattern.groups()
__A : Optional[int] = re.sub(a__ ,a__ ,a__ )
if option.strip() == "all-casing":
__A : Union[str, Any] = re.sub(obja.lower() ,obja.lower() ,a__ )
__A : Optional[Any] = re.sub(obja.upper() ,obja.upper() ,a__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__A : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
__A : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__A : str = lines[:start_index] + [theoretical_code] + lines[line_index:]
__A : int = start_index + 1
if overwrite and len(a__ ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(a__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.writelines(a__ )
return diffs
def __SCREAMING_SNAKE_CASE ( a__ : bool = False ) -> Tuple:
__A : Any = glob.glob(os.path.join(a__ ,"""**/*.py""" ) ,recursive=a__ )
__A : Optional[Any] = []
for filename in all_files:
__A : Dict = is_copy_consistent(a__ ,a__ )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(a__ ) > 0:
__A : Any = """\n""".join(a__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 17 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=2 , UpperCamelCase : str=True , UpperCamelCase : List[str]=False , UpperCamelCase : Tuple=10 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Tuple=32 * 4 , UpperCamelCase : Tuple=32 * 6 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : List[Any]=32 , ):
'''simple docstring'''
_snake_case : List[Any] = parent
_snake_case : Optional[Any] = batch_size
_snake_case : List[str] = is_training
_snake_case : Optional[int] = use_auxiliary_loss
_snake_case : Optional[Any] = num_queries
_snake_case : Any = num_channels
_snake_case : Union[str, Any] = min_size
_snake_case : Dict = max_size
_snake_case : str = num_labels
_snake_case : List[Any] = mask_feature_size
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase )
_snake_case : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase )
_snake_case : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase ) > 0.5
).float()
_snake_case : Any = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase ) > 0.5).long()
_snake_case : List[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case : Optional[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : int = output.encoder_hidden_states
_snake_case : Tuple = output.pixel_decoder_hidden_states
_snake_case : Tuple = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase ) , config.decoder_config.decoder_layers )
def UpperCamelCase_ ( self : Any , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : str=False ):
'''simple docstring'''
with torch.no_grad():
_snake_case : str = MaskFormerModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Tuple = model(pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase )
_snake_case : str = model(UpperCamelCase , output_hidden_states=UpperCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : str = MaskFormerForInstanceSegmentation(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
def comm_check_on_output(UpperCamelCase : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case : Tuple = model(pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase )
_snake_case : Optional[Any] = model(UpperCamelCase )
comm_check_on_output(UpperCamelCase )
_snake_case : Union[str, Any] = model(
pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase )
comm_check_on_output(UpperCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] =(MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a_ : Tuple =(
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a_ : Any =False
a_ : List[str] =False
a_ : List[str] =False
a_ : Optional[Any] =False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = MaskFormerModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase , **UpperCamelCase , output_hidden_states=UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[int] = model_class(UpperCamelCase )
_snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Optional[Any] = [*signature.parameters.keys()]
_snake_case : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_snake_case : int = MaskFormerModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = (self.model_tester.min_size,) * 2
_snake_case : Optional[int] = {
'pixel_values': torch.randn((2, 3, *size) , device=UpperCamelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=UpperCamelCase ),
'class_labels': torch.zeros(2 , 10 , device=UpperCamelCase ).long(),
}
_snake_case : Optional[int] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase )
_snake_case : Any = model(**UpperCamelCase )
self.assertTrue(outputs.loss is not None )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase , **UpperCamelCase , output_hidden_states=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = model_class(UpperCamelCase ).to(UpperCamelCase )
_snake_case : Dict = model(**UpperCamelCase , output_attentions=UpperCamelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_snake_case : Dict = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
_snake_case : int = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.train()
_snake_case : Optional[Any] = model(UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase ).loss
loss.backward()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs()
_snake_case : List[str] = True
_snake_case : List[Any] = True
_snake_case : List[Any] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.train()
_snake_case : Dict = model(UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase )
_snake_case : List[str] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_snake_case : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase_ = 1E-4
def lowerCamelCase_ ( )-> List[Any]:
_snake_case : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(UpperCamelCase )
_snake_case : Dict = self.default_image_processor
_snake_case : Tuple = prepare_img()
_snake_case : str = image_processor(UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
_snake_case : int = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_snake_case : Union[str, Any] = model(**UpperCamelCase )
_snake_case : Tuple = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
_snake_case : Optional[int] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
_snake_case : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(UpperCamelCase )
.eval()
)
_snake_case : Any = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : List[str] = image_processor(UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
_snake_case : List[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_snake_case : Optional[Any] = model(**UpperCamelCase )
# masks_queries_logits
_snake_case : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case : Any = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_snake_case : str = torch.tensor(UpperCamelCase ).to(UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
# class_queries_logits
_snake_case : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case : Tuple = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(UpperCamelCase )
.eval()
)
_snake_case : int = self.default_image_processor
_snake_case : Optional[int] = prepare_img()
_snake_case : int = image_processor(UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
_snake_case : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_snake_case : List[Any] = model(**UpperCamelCase )
# masks_queries_logits
_snake_case : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case : List[Any] = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_snake_case : Union[str, Any] = torch.tensor(UpperCamelCase ).to(UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
# class_queries_logits
_snake_case : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case : Union[str, Any] = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(UpperCamelCase )
.eval()
)
_snake_case : Optional[int] = self.default_image_processor
_snake_case : Dict = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='pt' , )
_snake_case : List[str] = inputs['pixel_values'].to(UpperCamelCase )
_snake_case : Tuple = [el.to(UpperCamelCase ) for el in inputs['mask_labels']]
_snake_case : Optional[int] = [el.to(UpperCamelCase ) for el in inputs['class_labels']]
with torch.no_grad():
_snake_case : List[str] = model(**UpperCamelCase )
self.assertTrue(outputs.loss is not None )
| 411 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase :int = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase :Union[str, Any] = ['YolosFeatureExtractor']
__lowerCAmelCase :List[Any] = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase :Any = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 278 |
def A ( UpperCAmelCase ):
return str(UpperCAmelCase ) == str(UpperCAmelCase )[::-1]
def A ( UpperCAmelCase ):
return int(UpperCAmelCase ) + int(str(UpperCAmelCase )[::-1] )
def A ( UpperCAmelCase = 10_000 ):
_snake_case : Optional[int] = []
for num in range(1 , UpperCAmelCase ):
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = num
while iterations < 50:
_snake_case : Optional[Any] = sum_reverse(UpperCAmelCase )
iterations += 1
if is_palindrome(UpperCAmelCase ):
break
else:
lychrel_nums.append(UpperCAmelCase )
return len(UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 278 | 1 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a = "allenai"
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dict((re.sub(R"""@@$""" , """""" , __UpperCAmelCase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , __UpperCAmelCase ), v) for k, v in d.items() )
__SCREAMING_SNAKE_CASE = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
__SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
assert os.path.exists(__UpperCAmelCase )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__SCREAMING_SNAKE_CASE = basename(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = dirname(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__SCREAMING_SNAKE_CASE = cls.hub_models()
__SCREAMING_SNAKE_CASE = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
__SCREAMING_SNAKE_CASE = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
__SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , archive_map=__UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = vars(chkpt["""args"""]["""model"""] )
__SCREAMING_SNAKE_CASE = args["""source_lang"""]
__SCREAMING_SNAKE_CASE = args["""target_lang"""]
__SCREAMING_SNAKE_CASE = dirname(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = basename(__UpperCAmelCase )
# dicts
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , f"""dict.{src_lang}.txt""" )
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , f"""dict.{tgt_lang}.txt""" )
__SCREAMING_SNAKE_CASE = Dictionary.load(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , """vocab-src.json""" )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__UpperCAmelCase , ensure_ascii=__UpperCAmelCase , indent=__UpperCAmelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = Dictionary.load(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , """vocab-tgt.json""" )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__UpperCAmelCase , ensure_ascii=__UpperCAmelCase , indent=__UpperCAmelCase ) )
# merges_file (bpecodes)
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if os.path.exists(__UpperCAmelCase ):
break
with open(__UpperCAmelCase , encoding="""utf-8""" ) as fin:
__SCREAMING_SNAKE_CASE = fin.read()
__SCREAMING_SNAKE_CASE = re.sub(R""" \d+$""" , """""" , __UpperCAmelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fout:
fout.write(__UpperCAmelCase )
# model config
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
__SCREAMING_SNAKE_CASE = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.0_2,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
__SCREAMING_SNAKE_CASE = 5
__SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]["""length_penalty"""]
else:
__SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__UpperCAmelCase , ensure_ascii=__UpperCAmelCase , indent=__UpperCAmelCase ) )
# tokenizer config
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__UpperCAmelCase , ensure_ascii=__UpperCAmelCase , indent=__UpperCAmelCase ) )
# model
__SCREAMING_SNAKE_CASE = chkpt["""models"""][0]
__SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
__SCREAMING_SNAKE_CASE = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__SCREAMING_SNAKE_CASE = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(__UpperCAmelCase )
# check that it loads ok
model_new.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
# save
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 109 |
'''simple docstring'''
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[0] * len(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
# use last results for better performance - dynamic programming
_UpperCamelCase =prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_UpperCamelCase =prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_UpperCamelCase =j
return prefix_result
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return max(prefix_function(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 404 | 0 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowercase__ = {
'''allenai/led-base-16384''': 1_63_84,
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = LEDTokenizer
snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="replace" , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=False , UpperCAmelCase_=True , **UpperCAmelCase_ , ):
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase_ ) != add_prefix_space:
snake_case_ = getattr(UpperCAmelCase_ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**UpperCAmelCase_ )
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = "post_processor"
snake_case_ = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state["sep"] )
if "cls" in state:
snake_case_ = tuple(state["cls"] )
snake_case_ = False
if state.get("add_prefix_space" , UpperCAmelCase_ ) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get("trim_offsets" , UpperCAmelCase_ ) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(UpperCAmelCase_ , state.pop("type" ) )
snake_case_ = component_class(**UpperCAmelCase_ )
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowercase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value
snake_case_ = value
def _lowercase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
snake_case_ = kwargs.get("is_split_into_words" , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _lowercase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
snake_case_ = kwargs.get("is_split_into_words" , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
snake_case_ = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_=None ):
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase_ = None , UpperCAmelCase_ = None , ):
snake_case_ = super()._pad(
encoded_inputs=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding_strategy=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
snake_case_ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case_ = len(encoded_inputs["global_attention_mask"] ) != len(UpperCAmelCase_ )
if needs_to_be_padded:
snake_case_ = len(UpperCAmelCase_ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case_ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case_ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 717 |
'''simple docstring'''
def __snake_case ( lowercase : int ):
snake_case_ = [[0 for _ in range(lowercase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
snake_case_ = 1
for n in range(m + 1 ):
for k in range(1 , lowercase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase__ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowercase__ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 420 | 0 |
import cmath
import math
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = math.radians(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = math.radians(SCREAMING_SNAKE_CASE )
# Convert voltage and current to rectangular form
__UpperCamelCase :Union[str, Any] = cmath.rect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = cmath.rect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
a__ : Optional[datasets.Features] = None
a__ : str = "utf-8"
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : bool = True # deprecated
a__ : Optional[int] = None # deprecated
a__ : int = 1_0 << 2_0 # 10MB
a__ : Optional[bool] = None
class lowerCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
a__ : Optional[int] = JsonConfig
def UpperCamelCase__ ( self) -> int:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
__UpperCamelCase :int = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
__UpperCamelCase :Optional[Any] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__lowercase , (str, list, tuple)):
__UpperCamelCase :List[str] = data_files
if isinstance(__lowercase , __lowercase):
__UpperCamelCase :Optional[int] = [files]
__UpperCamelCase :Dict = [dl_manager.iter_files(__lowercase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
__UpperCamelCase :str = []
for split_name, files in data_files.items():
if isinstance(__lowercase , __lowercase):
__UpperCamelCase :str = [files]
__UpperCamelCase :Tuple = [dl_manager.iter_files(__lowercase) for file in files]
splits.append(datasets.SplitGenerator(name=__lowercase , gen_kwargs={'''files''': files}))
return splits
def UpperCamelCase__ ( self , __lowercase) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
__UpperCamelCase :int = self.config.features.arrow_schema.field(__lowercase).type
__UpperCamelCase :Tuple = pa_table.append_column(__lowercase , pa.array([None] * len(__lowercase) , type=__lowercase))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCamelCase :List[str] = table_cast(__lowercase , self.config.features.arrow_schema)
return pa_table
def UpperCamelCase__ ( self , __lowercase) -> str:
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowercase)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowercase , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
__UpperCamelCase :Optional[Any] = json.load(__lowercase)
# We keep only the field we are interested in
__UpperCamelCase :Union[str, Any] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowercase , (list, tuple)):
__UpperCamelCase :int = set().union(*[row.keys() for row in dataset])
__UpperCamelCase :Union[str, Any] = {col: [row.get(__lowercase) for row in dataset] for col in keys}
else:
__UpperCamelCase :List[Any] = dataset
__UpperCamelCase :Optional[int] = pa.Table.from_pydict(__lowercase)
yield file_idx, self._cast_table(__lowercase)
# If the file has one json object per line
else:
with open(__lowercase , '''rb''') as f:
__UpperCamelCase :List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__UpperCamelCase :Optional[int] = max(self.config.chunksize // 32 , 16 << 10)
__UpperCamelCase :List[str] = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
__UpperCamelCase :List[str] = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowercase)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__UpperCamelCase :Union[str, Any] = batch.decode(self.config.encoding , errors=__lowercase).encode('''utf-8''')
try:
while True:
try:
__UpperCamelCase :Optional[int] = paj.read_json(
io.BytesIO(__lowercase) , read_options=paj.ReadOptions(block_size=__lowercase))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowercase , pa.ArrowInvalid)
and "straddling" not in str(__lowercase)
or block_size > len(__lowercase)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(__lowercase)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowercase , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
__UpperCamelCase :Tuple = json.load(__lowercase)
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(__lowercase)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowercase , __lowercase): # list is the only sequence type supported in JSON
try:
__UpperCamelCase :Optional[int] = set().union(*[row.keys() for row in dataset])
__UpperCamelCase :Optional[int] = {col: [row.get(__lowercase) for row in dataset] for col in keys}
__UpperCamelCase :int = pa.Table.from_pydict(__lowercase)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(__lowercase)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(__lowercase)
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(__lowercase)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowercase)
batch_idx += 1
| 167 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase__ ( _lowercase , _lowercase = "cpu" , _lowercase = None ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
UpperCAmelCase_ : int = v.half()
if save_path is None: # overwrite src_path
UpperCAmelCase_ : Optional[Any] = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 700 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Optional[Any] = 13
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : Tuple = 99
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[int] = 32
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : List[Any] = 0.1
UpperCAmelCase_ : int = 0.1
UpperCAmelCase_ : List[str] = 512
UpperCAmelCase_ : Any = 16
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Any = 0.02
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Dict = '''last'''
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Union[str, Any] = 0
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_lengths:
UpperCAmelCase_ : Optional[int] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : int = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Any:
UpperCAmelCase_ : Tuple = TFFlaubertModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = [input_ids, input_mask]
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
UpperCAmelCase_ : int = TFFlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : List[Any] = TFFlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> int:
UpperCAmelCase_ : List[Any] = TFFlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : List[str] = TFFlaubertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
UpperCAmelCase_ : List[Any] = self.num_choices
UpperCAmelCase_ : Any = TFFlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : str = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : Any = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCAmelCase = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self ) -> Any:
UpperCAmelCase_ : Optional[int] = TFFlaubertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,emb_dim=37 )
def a__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> Any:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TFFlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
@require_sentencepiece
@require_tokenizers
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
UpperCAmelCase_ : Dict = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase_ : Optional[int] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,_SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase_ : List[Any] = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 300 | 0 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_snake_case = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 14 ):
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ = primes[group]['prime']
lowerCamelCase__ = primes[group]['generator']
lowerCamelCase__ = int(hexlify(urandom(32 ) ) , base=16 )
def _UpperCamelCase ( self : Tuple ):
return hex(self.__private_key )[2:]
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = pow(self.generator , self.__private_key , self.prime )
return hex(SCREAMING_SNAKE_CASE__ )[2:]
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(SCREAMING_SNAKE_CASE__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = int(SCREAMING_SNAKE_CASE__ , base=16 )
if not self.is_valid_public_key(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ = pow(SCREAMING_SNAKE_CASE__ , self.__private_key , self.prime )
return shaaaa(str(SCREAMING_SNAKE_CASE__ ).encode() ).hexdigest()
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(SCREAMING_SNAKE_CASE__ , (prime - 1) // 2 , SCREAMING_SNAKE_CASE__ ) == 1
)
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 14 ):
lowerCamelCase__ = int(SCREAMING_SNAKE_CASE__ , base=16 )
lowerCamelCase__ = int(SCREAMING_SNAKE_CASE__ , base=16 )
lowerCamelCase__ = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return shaaaa(str(SCREAMING_SNAKE_CASE__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510 |
"""simple docstring"""
def snake_case ( _a: float , _a: float )-> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(1_25.50, 0.05) = }""")
| 510 | 1 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
_lowercase : Dict = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase_ ) )
return round(lowerCamelCase_ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
from __future__ import annotations
import requests
def UpperCamelCase_( lowerCamelCase_ ) -> dict:
_lowercase : Dict = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(lowerCamelCase_ ).json()
def UpperCamelCase_( lowerCamelCase_ = 10 ) -> list[dict]:
_lowercase : Dict = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Dict = requests.get(lowerCamelCase_ ).json()[:max_stories]
return [get_hackernews_story(lowerCamelCase_ ) for story_id in story_ids]
def UpperCamelCase_( lowerCamelCase_ = 10 ) -> str:
_lowercase : Optional[int] = hackernews_top_stories(lowerCamelCase_ )
return "\n".join('* [{title}]({url})'.format(**lowerCamelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 354 | 0 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCAmelCase_ ( __A : List[str] , __A : str , __A : List[Any] , __A : List[str]=5 ):
'''simple docstring'''
assert masked_input.count('<mask>' ) == 1
snake_case: Tuple = torch.tensor(tokenizer.encode(__A , add_special_tokens=__A ) ).unsqueeze(0 ) # Batch size 1
snake_case: List[str] = model(__A )[0] # The last hidden-state is the first element of the output tuple
snake_case: Dict = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
snake_case: Any = logits[0, masked_index, :]
snake_case: str = logits.softmax(dim=0 )
snake_case , snake_case: Union[str, Any] = prob.topk(k=__A , dim=0 )
snake_case: Optional[int] = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__A ) )] )
snake_case: Dict = tokenizer.mask_token
snake_case: str = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
snake_case: int = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(__A ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(__A ) , __A ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__A , __A ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__UpperCAmelCase = CamembertTokenizer.from_pretrained("camembert-base")
__UpperCAmelCase = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
__UpperCAmelCase = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 329 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
snake_case: Dict = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) )
snake_case: Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = self.get_dummy_inputs()
snake_case: Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: int = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = self.get_dummy_inputs()
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: int = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.get_dummy_inputs()
snake_case: Tuple = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Dict = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.get_dummy_inputs()
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.get_dummy_inputs()
snake_case: Any = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = ort.SessionOptions()
snake_case: int = False
return options
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
snake_case: Dict = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'A fantasy landscape, trending on artstation'
snake_case: List[Any] = torch.manual_seed(0 )
snake_case: str = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
snake_case: Tuple = output.images
snake_case: Any = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case: Any = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
snake_case: str = init_image.resize((1_28, 1_28) )
snake_case: Tuple = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
snake_case: List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'A fantasy landscape, trending on artstation'
snake_case: Any = torch.manual_seed(0 )
snake_case: Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
snake_case: Any = output.images
snake_case: int = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case: Dict = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 329 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : List[str] , lowerCAmelCase_ : VQModel , lowerCAmelCase_ : UNetaDModel , lowerCAmelCase_ : DDIMScheduler):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=__A , unet=__A , scheduler=__A)
@torch.no_grad()
def __call__( self : Optional[Any] , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 5_0 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : int , ):
"""simple docstring"""
lowercase_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__A , )
lowercase_ = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
lowercase_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__A)
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowercase_ = """eta""" in set(inspect.signature(self.scheduler.step).parameters.keys())
lowercase_ = {}
if accepts_eta:
lowercase_ = eta
for t in self.progress_bar(self.scheduler.timesteps):
lowercase_ = self.scheduler.scale_model_input(__A , __A)
# predict the noise residual
lowercase_ = self.unet(__A , __A).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase_ = self.scheduler.step(__A , __A , __A , **__A).prev_sample
# decode the image latents with the VAE
lowercase_ = self.vqvae.decode(__A).sample
lowercase_ = (image / 2 + 0.5).clamp(0 , 1)
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
lowercase_ = self.numpy_to_pil(__A)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A)
| 718 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=2_81_23 ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ = set()
lowercase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__lowerCAmelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 100 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "blenderbot-small"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]=50_265 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : str=2_048 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Dict=8 , __SCREAMING_SNAKE_CASE : Dict=2_048 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=2 , **__SCREAMING_SNAKE_CASE : Any , ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE = {0: """batch"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def UpperCAmelCase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = super().outputs
else:
__SCREAMING_SNAKE_CASE = super(__SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Generate decoder inputs
__SCREAMING_SNAKE_CASE = seq_length if not self.use_past else 1
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__SCREAMING_SNAKE_CASE = dict(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
__SCREAMING_SNAKE_CASE = common_inputs["""decoder_input_ids"""].shape[1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = decoder_seq_length + 3
__SCREAMING_SNAKE_CASE = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , dim=1 )
__SCREAMING_SNAKE_CASE = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
__SCREAMING_SNAKE_CASE = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - min_num_layers
__SCREAMING_SNAKE_CASE = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
__SCREAMING_SNAKE_CASE = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) )
return common_inputs
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
__SCREAMING_SNAKE_CASE = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(__SCREAMING_SNAKE_CASE )
]
return common_inputs
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE = tokenizer.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
__SCREAMING_SNAKE_CASE = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__SCREAMING_SNAKE_CASE = dict(tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
return common_inputs
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
elif self.task == "causal-lm":
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_causal_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
return common_inputs
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = super()._flatten_past_key_values_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = super(__SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 627 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = math.sqrt(a__ )
__SCREAMING_SNAKE_CASE = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def a__ ( a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.zeros((kernel_size, kernel_size) )
for i in range(0 , a__ ):
for j in range(0 , a__ ):
__SCREAMING_SNAKE_CASE = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(a__ , a__ )
def a__ ( a__ , a__ , a__ , a__ , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.zeros(img.shape )
__SCREAMING_SNAKE_CASE = get_gauss_kernel(a__ , a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__SCREAMING_SNAKE_CASE = get_slice(a__ , a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE = img_s - img_s[kernel_size // 2, kernel_size // 2]
__SCREAMING_SNAKE_CASE = vec_gaussian(a__ , a__ )
__SCREAMING_SNAKE_CASE = np.multiply(a__ , a__ )
__SCREAMING_SNAKE_CASE = np.multiply(a__ , a__ )
__SCREAMING_SNAKE_CASE = np.sum(a__ ) / np.sum(a__ )
__SCREAMING_SNAKE_CASE = val
return imga
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = args[1] if args[1:] else """../image_data/lena.jpg"""
__SCREAMING_SNAKE_CASE = float(args[2] ) if args[2:] else 1.0
__SCREAMING_SNAKE_CASE = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__SCREAMING_SNAKE_CASE = int(args[4] )
__SCREAMING_SNAKE_CASE = kernel_size + abs(kernel_size % 2 - 1 )
else:
__SCREAMING_SNAKE_CASE = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = parse_args(sys.argv)
UpperCAmelCase : Union[str, Any] = cva.imread(filename, 0)
cva.imshow('input image', img)
UpperCAmelCase : Optional[int] = img / 2_5_5
UpperCAmelCase : Optional[Any] = out.astype('float32')
UpperCAmelCase : Tuple = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
UpperCAmelCase : List[Any] = out * 2_5_5
UpperCAmelCase : Any = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 627 | 1 |
def UpperCamelCase_( _A :int )-> int:
UpperCamelCase__ = [1]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = 0, 0, 0
UpperCamelCase__ = ugly_nums[ia] * 2
UpperCamelCase__ = ugly_nums[ia] * 3
UpperCamelCase__ = ugly_nums[ia] * 5
for _ in range(1 , _A ):
UpperCamelCase__ = min(_A , _A , _A )
ugly_nums.append(_A )
if next_num == next_a:
ia += 1
UpperCamelCase__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCamelCase__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCamelCase__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 185 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 185 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
snake_case__ = str(__lowerCAmelCase )
return n == n[::-1]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100_0000 ) -> Union[str, Any]:
snake_case__ = 0
for i in range(1 , __lowerCAmelCase ):
if is_palindrome(__lowerCAmelCase ) and is_palindrome(bin(__lowerCAmelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 33 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 97 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __a :
'''simple docstring'''
UpperCAmelCase__ : List[str]
UpperCAmelCase__ : Optional[str] = None
# Automatically constructed
UpperCAmelCase__ : ClassVar[str] = "dict"
UpperCAmelCase__ : ClassVar[Any] = None
UpperCAmelCase__ : str = field(default="""Translation""" , init=__A , repr=__A )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __snake_case ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __a :
'''simple docstring'''
UpperCAmelCase__ : Optional[List] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[str] = None
# Automatically constructed
UpperCAmelCase__ : ClassVar[str] = "dict"
UpperCAmelCase__ : ClassVar[Any] = None
UpperCAmelCase__ : str = field(default="""TranslationVariableLanguages""" , init=__A , repr=__A )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE_ : Dict = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set(self.languages )
if self.languages and set(UpperCamelCase__ ) - lang_set:
raise ValueError(
F'''Some languages in example ({', '.join(sorted(set(UpperCamelCase__ ) - lang_set ) )}) are not in valid set ({', '.join(UpperCamelCase__ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE_ : List[str] = []
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = zip(*sorted(UpperCamelCase__ ) )
return {"language": languages, "translation": translations}
def __snake_case ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 97 | 1 |
def UpperCamelCase ( __magic_name__ : list ) -> list:
"""simple docstring"""
def merge(__magic_name__ : list , __magic_name__ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__magic_name__ ) <= 1:
return collection
lowercase__ = len(__magic_name__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = input('Enter numbers separated by a comma:\n').strip()
A : str = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 15 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A : List[str] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
A : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int = CHRF.CHAR_ORDER , _UpperCAmelCase : int = CHRF.WORD_ORDER , _UpperCAmelCase : int = CHRF.BETA , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> int:
"""simple docstring"""
lowercase__ = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase__ = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
lowercase__ = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 15 | 1 |
"""simple docstring"""
def snake_case ( _a: Any = 50 )-> Any:
'''simple docstring'''
lowerCamelCase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 703 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : str = '▁'
UpperCamelCase : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
UpperCamelCase : Optional[Any] = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
UpperCamelCase : str = {'vinai/bartpho-syllable': 10_24}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = vocab_file
lowerCamelCase__ = monolingual_vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCamelCase__ = {}
lowerCamelCase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ = cnt
cnt += 1
with open(_lowerCAmelCase ,"""r""" ,encoding="""utf-8""" ) as f:
for line in f.readlines():
lowerCamelCase__ = line.strip().split()[0]
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
if str(_lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
lowerCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_ids_to_tokens )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.fairseq_ids_to_tokens[index]
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase ,""" """ ).strip()
return out_string
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 50 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowercase ( _lowercase ):
lowerCamelCase : jnp.ndarray
@flax_register_to_config
class __lowercase ( nn.Module , _lowercase , _lowercase ):
lowerCamelCase : int = 32
lowerCamelCase : int = 4
lowerCamelCase : int = 4
lowerCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCamelCase : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCamelCase : Union[bool, Tuple[bool]] = False
lowerCamelCase : Tuple[int] = (320, 640, 1280, 1280)
lowerCamelCase : int = 2
lowerCamelCase : Union[int, Tuple[int]] = 8
lowerCamelCase : Optional[Union[int, Tuple[int]]] = None
lowerCamelCase : int = 1280
lowerCamelCase : float = 0.0
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
lowerCamelCase : bool = True
lowerCamelCase : int = 0
lowerCamelCase : bool = False
def UpperCAmelCase__ (self , A ):
# init input tensors
lowerCamelCase_ : Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ : Dict = jnp.zeros(A , dtype=jnp.floataa )
lowerCamelCase_ : Optional[int] = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase_ : Optional[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase_ : Any = jax.random.split(A )
lowerCamelCase_ : Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(A , A , A , A )["params"]
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.block_out_channels
lowerCamelCase_ : Tuple = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ : Dict = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ : Any = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase_ : Dict = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase_ : Tuple = FlaxTimestepEmbedding(A , dtype=self.dtype )
lowerCamelCase_ : str = self.only_cross_attention
if isinstance(A , A ):
lowerCamelCase_ : List[str] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A , A ):
lowerCamelCase_ : List[str] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ : Dict = []
lowerCamelCase_ : int = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ : Tuple = output_channel
lowerCamelCase_ : List[str] = block_out_channels[i]
lowerCamelCase_ : Tuple = i == len(A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ : int = FlaxCrossAttnDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase_ : List[str] = FlaxDownBlockaD(
in_channels=A , out_channels=A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A )
lowerCamelCase_ : List[Any] = down_blocks
# mid
lowerCamelCase_ : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : Tuple = list(reversed(A ) )
lowerCamelCase_ : Union[str, Any] = list(reversed(A ) )
lowerCamelCase_ : Any = list(reversed(A ) )
lowerCamelCase_ : Union[str, Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCamelCase_ : Tuple = output_channel
lowerCamelCase_ : Optional[Any] = reversed_block_out_channels[i]
lowerCamelCase_ : Optional[int] = reversed_block_out_channels[min(i + 1 , len(A ) - 1 )]
lowerCamelCase_ : Union[str, Any] = i == len(A ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCamelCase_ : Any = FlaxCrossAttnUpBlockaD(
in_channels=A , out_channels=A , prev_output_channel=A , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase_ : Dict = FlaxUpBlockaD(
in_channels=A , out_channels=A , prev_output_channel=A , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(A )
lowerCamelCase_ : List[Any] = output_channel
lowerCamelCase_ : Tuple = up_blocks
# out
lowerCamelCase_ : Dict = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
lowerCamelCase_ : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self , A , A , A , A=None , A=None , A = True , A = False , ):
# 1. time
if not isinstance(A , jnp.ndarray ):
lowerCamelCase_ : Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ : Optional[int] = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ : Any = jnp.expand_dims(A , 0 )
lowerCamelCase_ : Optional[Any] = self.time_proj(A )
lowerCamelCase_ : List[str] = self.time_embedding(A )
# 2. pre-process
lowerCamelCase_ : List[Any] = jnp.transpose(A , (0, 2, 3, 1) )
lowerCamelCase_ : List[str] = self.conv_in(A )
# 3. down
lowerCamelCase_ : int = (sample,)
for down_block in self.down_blocks:
if isinstance(A , A ):
lowerCamelCase_ : Tuple = down_block(A , A , A , deterministic=not train )
else:
lowerCamelCase_ : Tuple = down_block(A , A , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCamelCase_ : Optional[Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
A , A ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ : Union[str, Any] = new_down_block_res_samples
# 4. mid
lowerCamelCase_ : Tuple = self.mid_block(A , A , A , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCamelCase_ : Optional[int] = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCamelCase_ : Any = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(A , A ):
lowerCamelCase_ : List[str] = up_block(
A , temb=A , encoder_hidden_states=A , res_hidden_states_tuple=A , deterministic=not train , )
else:
lowerCamelCase_ : Dict = up_block(A , temb=A , res_hidden_states_tuple=A , deterministic=not train )
# 6. post-process
lowerCamelCase_ : str = self.conv_norm_out(A )
lowerCamelCase_ : List[str] = nn.silu(A )
lowerCamelCase_ : List[str] = self.conv_out(A )
lowerCamelCase_ : Any = jnp.transpose(A , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=A )
| 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowercase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__lowercase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
a_ : int = None
a_ : List[Any] = logging.get_logger(__name__)
a_ : List[Any] = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
a_ : Optional[int] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
a_ : Union[str, Any] = {
"google/rembert": 256,
}
a_ : List[Any] = "▁"
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =RemBertTokenizer
def __init__( self : int , snake_case__ : Any=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=False , snake_case__ : Dict="[CLS]" , snake_case__ : List[str]="[SEP]" , snake_case__ : List[str]="<unk>" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="<pad>" , snake_case__ : Tuple="[CLS]" , snake_case__ : str="[MASK]" , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCamelCase ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(snake_case__ ) )
return
SCREAMING_SNAKE_CASE = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 439 |
import string
def __lowerCAmelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ''
for i in sequence:
SCREAMING_SNAKE_CASE = ord(_UpperCamelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def __lowerCAmelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = string.ascii_letters
SCREAMING_SNAKE_CASE = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_UpperCamelCase )] if c in letters else c for c in sequence )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('Running performance benchmarks...' )
SCREAMING_SNAKE_CASE = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(f"""> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_UpperCamelCase )} seconds""" )
print(f"""> atbash(): {timeit('atbash(printable)' , setup=_UpperCamelCase )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 439 | 1 |
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def A__ ( UpperCamelCase ):
return input_array.reshape((input_array.size, 1) )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = np.nan
for i in range(UpperCamelCase ):
A = features[:, labels == i]
A = data.mean(1 )
# Centralize the data of class i
A = data - column_reshape(UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
A = np.dot(UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = features.mean(1 )
A = np.nan
for i in range(UpperCamelCase ):
A = features[:, labels == i]
A = data.shape[1]
A = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase ) , (column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
A = device_data * np.dot(
column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase ) , (column_reshape(UpperCamelCase ) - column_reshape(UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def A__ ( UpperCamelCase , UpperCamelCase ):
# Check if the features have been loaded
if features.any():
A = features.mean(1 )
# Center the dataset
A = features - np.reshape(UpperCamelCase , (data_mean.size, 1) )
A = np.dot(UpperCamelCase , centered_data.T ) / features.shape[1]
A, A = np.linalg.eigh(UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
A = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
A = np.dot(filtered_eigenvectors.T , UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
A, A = eigh(
covariance_between_classes(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , covariance_within_classes(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
A = eigenvectors[:, ::-1][:, :dimensions]
A, A, A = np.linalg.svd(UpperCamelCase )
A = svd_matrix[:, 0:dimensions]
A = np.dot(filtered_svd_matrix.T , UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A__ ( ):
# Create dummy dataset with 2 classes and 3 features
A = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
A = np.array([0, 0, 0, 1, 1] )
A = 2
A = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase ) as error_info:
A = linear_discriminant_analysis(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if isinstance(UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A__ ( ):
A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
A = 2
A = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase ) as error_info:
A = principal_component_analysis(UpperCamelCase , UpperCamelCase )
if not np.allclose(UpperCamelCase , UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 524 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A__ ( UpperCamelCase ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A__ ( ):
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
A = [1, 2, 3]
with pytest.raises(UpperCamelCase ):
with parallel_backend("unsupported backend" ):
map_nested(UpperCamelCase , UpperCamelCase , num_proc=2 )
with pytest.raises(UpperCamelCase ):
with parallel_backend("unsupported backend" ):
map_nested(UpperCamelCase , UpperCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def A__ ( UpperCamelCase ):
A = [1, 2]
A = {"a": 1, "b": 2}
A = {"a": [1, 2], "b": [3, 4]}
A = {"a": {"1": 1}, "b": 2}
A = {"a": 1, "b": 2, "c": 3, "d": 4}
A = [2, 3]
A = {"a": 2, "b": 3}
A = {"a": [2, 3], "b": [4, 5]}
A = {"a": {"1": 2}, "b": 3}
A = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(UpperCamelCase , UpperCamelCase , num_proc=UpperCamelCase ) == expected_map_nested_sa
assert map_nested(UpperCamelCase , UpperCamelCase , num_proc=UpperCamelCase ) == expected_map_nested_sa
assert map_nested(UpperCamelCase , UpperCamelCase , num_proc=UpperCamelCase ) == expected_map_nested_sa
assert map_nested(UpperCamelCase , UpperCamelCase , num_proc=UpperCamelCase ) == expected_map_nested_sa
assert map_nested(UpperCamelCase , UpperCamelCase , num_proc=UpperCamelCase ) == expected_map_nested_sa
| 524 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowercase ( unittest.TestCase , a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : int = load_tool('text-to-speech' )
self.tool.setup()
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : str = self.tool('hey' )
_snake_case : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Tuple = self.tool('hey' )
_snake_case : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 304 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ : Union[str, Any] = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
lowercase_ : List[str] = {'''allegro/herbert-base-cased''': 514}
lowercase_ : Union[str, Any] = {}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Union[str, Any] = HerbertTokenizer
def __init__( self : int , lowerCamelCase_ : int=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Dict="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Dict="<mask>" , lowerCamelCase_ : Optional[Any]="</s>" , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , cls_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , **lowerCamelCase_ , )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id]
_snake_case : str = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[str] = [self.sep_token_id]
_snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 304 | 1 |
from math import factorial
__snake_case :List[Any] = {str(d): factorial(d) for d in range(10)}
def __snake_case ( _UpperCAmelCase ):
return sum(DIGIT_FACTORIAL[d] for d in str(_UpperCAmelCase ) )
def __snake_case ( ):
__a = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _UpperCAmelCase ) if sum_of_digit_factorial(_UpperCAmelCase ) == i )
if __name__ == "__main__":
print(f'{solution() = }')
| 60 |
__snake_case :str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Return True if there is node that has not iterated.
__a = [False] * len(_UpperCAmelCase )
__a = [s]
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCAmelCase )
__a = True
__a = u
return visited[t]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [-1] * (len(_UpperCAmelCase ))
__a = 0
__a = []
__a = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = float('''Inf''' )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(_UpperCAmelCase , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 60 | 1 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCAmelCase :
def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Any=3 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : Dict=False , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=99 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : int=5 , UpperCAmelCase : str=4 , UpperCAmelCase : Optional[Any]=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : Dict=16 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Optional[Any]=0.0_2 , UpperCAmelCase : Dict=3 , UpperCAmelCase : int=4 , UpperCAmelCase : List[str]=None , ) -> Dict:
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : int = seq_length
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : Any = use_input_mask
lowerCamelCase__ : Dict = use_token_type_ids
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Any = type_sequence_label_size
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Union[str, Any] = num_labels
lowerCamelCase__ : str = num_choices
lowerCamelCase__ : int = scope
def A_ ( self : Dict ) -> Any:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Any ) -> Any:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase , )
def A_ ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
lowerCamelCase__ : Tuple = FalconModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Optional[Any] = FalconModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : str = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Union[str, Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
lowerCamelCase__ : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : str , ) -> List[str]:
lowerCamelCase__ : Optional[Any] = FalconForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : int = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , ) -> List[str]:
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : str = FalconForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# first forward pass
lowerCamelCase__ : List[str] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase , )
lowerCamelCase__ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase__ : Union[str, Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['hidden_states'][0]
lowerCamelCase__ : Tuple = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (FalconForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : List[str] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = FalconModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A_ ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def A_ ( self : Dict ) -> Dict:
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ , *lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCamelCase__ : List[Any] = alibi
self.model_tester.create_and_check_model(UpperCAmelCase , *UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = input_dict['input_ids']
lowerCamelCase__ : Dict = input_ids.ne(1 ).to(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase__ : int = FalconForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = 3
lowerCamelCase__ : List[str] = 'single_label_classification'
lowerCamelCase__ : Optional[int] = input_dict['input_ids']
lowerCamelCase__ : Union[str, Any] = input_ids.ne(1 ).to(UpperCAmelCase )
lowerCamelCase__ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = FalconForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Any = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self : Any ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[str] = input_dict['input_ids']
lowerCamelCase__ : Optional[Any] = FalconForCausalLM(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Any = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : int = input_ids.shape[0]
lowerCamelCase__ : str = model._convert_to_rw_cache(result.past_key_values )
lowerCamelCase__ : Optional[Any] = model._convert_cache_to_standard_format(UpperCAmelCase , UpperCAmelCase )
for layer in range(len(UpperCAmelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def A_ ( self : List[str] ) -> str:
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = 3
lowerCamelCase__ : List[str] = 'multi_label_classification'
lowerCamelCase__ : Optional[int] = input_dict['input_ids']
lowerCamelCase__ : Tuple = input_ids.ne(1 ).to(UpperCAmelCase )
lowerCamelCase__ : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase__ : Any = FalconForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Any = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self : Any ) -> Tuple:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase , 'use_cache' ):
return
lowerCamelCase__ : Dict = model_class(UpperCAmelCase ).to(UpperCAmelCase )
if "use_cache" not in inputs:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Any = model(**UpperCAmelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCamelCase__ : Any = (
getattr(UpperCAmelCase , 'decoder_layers' , UpperCAmelCase )
or getattr(UpperCAmelCase , 'num_decoder_layers' , UpperCAmelCase )
or config.num_hidden_layers
)
lowerCamelCase__ : Dict = getattr(UpperCAmelCase , 'num_kv_heads' , config.num_attention_heads )
lowerCamelCase__ : Dict = getattr(UpperCAmelCase , 'd_model' , config.hidden_size )
lowerCamelCase__ : str = embed_dim // num_attention_heads
lowerCamelCase__ : List[Any] = outputs['past_key_values']
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Any = inputs['input_ids'].shape
for i in range(UpperCAmelCase ):
if config.new_decoder_architecture:
lowerCamelCase__ : int = config.num_attention_heads
elif config.multi_query:
lowerCamelCase__ : Optional[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : int ) -> Union[str, Any]:
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
lowerCamelCase__ : List[Any] = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Any = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
lowerCamelCase__ : Optional[Any] = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=19 )
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )[0]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def A_ ( self : Any ) -> int:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCamelCase__ : str = AutoTokenizer.from_pretrained(UpperCAmelCase )
lowerCamelCase__ : int = FalconForCausalLM.from_pretrained(UpperCAmelCase )
model.eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=4 )
model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=4 )
model.generate(**UpperCAmelCase , num_beams=2 , max_new_tokens=4 )
@slow
def A_ ( self : Union[str, Any] ) -> str:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained(UpperCAmelCase )
lowerCamelCase__ : Tuple = FalconForCausalLM.from_pretrained(UpperCAmelCase )
model.eval()
model.to(device=UpperCAmelCase )
lowerCamelCase__ : Any = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase )
# Test results are the same with and without cache
lowerCamelCase__ : List[str] = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=20 , use_cache=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model.generate(**UpperCAmelCase , do_sample=UpperCAmelCase , max_new_tokens=20 , use_cache=UpperCAmelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 295 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : str ) -> Dict:
lowerCamelCase__ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : Tuple = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
lowerCamelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCamelCase__ : Any = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
lowerCamelCase__ : int = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : List[str] , **UpperCAmelCase : str ) -> int:
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A_ ( self : Optional[int] , **UpperCAmelCase : List[str] ) -> List[str]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A_ ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> str:
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : Dict ) -> Dict:
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[str] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def A_ ( self : List[str] ) -> Tuple:
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase__ : int = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def A_ ( self : Any ) -> Tuple:
lowerCamelCase__ : List[Any] = self.get_image_processor()
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = self.prepare_image_inputs()
lowerCamelCase__ : int = image_processor(UpperCAmelCase , return_tensors='np' )
lowerCamelCase__ : List[str] = processor(images=UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Tuple ) -> List[Any]:
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : str = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : Tuple = 'lower newer'
lowerCamelCase__ : Tuple = processor(text=UpperCAmelCase )
lowerCamelCase__ : Any = tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : Optional[int] ) -> Any:
lowerCamelCase__ : Optional[int] = self.get_image_processor()
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : str = 'lower newer'
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(UpperCAmelCase ):
processor()
def A_ ( self : Optional[Any] ) -> List[str]:
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : List[str] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Dict = processor.batch_decode(UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : Dict = 'lower newer'
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 295 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _a (__SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__SCREAMING_SNAKE_CASE )
@dataclass
class UpperCAmelCase :
"""simple docstring"""
lowerCAmelCase_ = field(
metadata={"""help""": """The csv file to plot."""} , )
lowerCAmelCase_ = field(
default=lowercase_ , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
lowerCAmelCase_ = field(
default=lowercase_ , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
lowerCAmelCase_ = field(
default=lowercase_ , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
lowerCAmelCase_ = field(
default=lowercase_ , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
lowerCAmelCase_ = field(
default=lowercase_ , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
lowerCAmelCase_ = list_field(
default=lowercase_ , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""})
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
int(__SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
float(__SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> str:
_UpperCamelCase =args
_UpperCamelCase =defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
_UpperCamelCase =csv.DictReader(UpperCamelCase__ )
for row in reader:
_UpperCamelCase =row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
_UpperCamelCase =int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
_UpperCamelCase =float(row['''result'''] )
def UpperCamelCase__ ( self : str ) -> int:
_UpperCamelCase , _UpperCamelCase =plt.subplots()
_UpperCamelCase ='''Time usage''' if self.args.is_time else '''Memory usage'''
_UpperCamelCase =title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCamelCase =sorted(set(self.result_dict[model_name]['''bsz'''] ) )
_UpperCamelCase =sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
_UpperCamelCase =self.result_dict[model_name]['''result''']
((_UpperCamelCase) , (_UpperCamelCase)) =(
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCamelCase =(
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCamelCase =np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=UpperCamelCase__ , )
else:
_UpperCamelCase =np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCamelCase) , (_UpperCamelCase)) =(
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
_UpperCamelCase =np.asarray(UpperCamelCase__ , UpperCamelCase__ )[: len(UpperCamelCase__ )]
plt.scatter(
UpperCamelCase__ , UpperCamelCase__ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(UpperCamelCase__ , UpperCamelCase__ , '''--''' )
title_str += F''' {label_model_name} vs.'''
_UpperCamelCase =title_str[:-4]
_UpperCamelCase ='''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(UpperCamelCase__ )
plt.xlabel(UpperCamelCase__ )
plt.ylabel(UpperCamelCase__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _a ():
"""simple docstring"""
_UpperCamelCase =HfArgumentParser(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =parser.parse_args_into_dataclasses()[0]
_UpperCamelCase =Plot(args=__SCREAMING_SNAKE_CASE )
plot.plot()
if __name__ == "__main__":
main()
| 707 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) != 32:
raise ValueError('''Input must be of length 32''' )
_UpperCamelCase =b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
_UpperCamelCase =format(__SCREAMING_SNAKE_CASE , '''08x''' )[-8:]
_UpperCamelCase =b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =b''''''
for char in message:
bit_string += format(__SCREAMING_SNAKE_CASE , '''08b''' ).encode('''utf-8''' )
_UpperCamelCase =format(len(__SCREAMING_SNAKE_CASE ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__SCREAMING_SNAKE_CASE ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(__SCREAMING_SNAKE_CASE ) , 512 ):
_UpperCamelCase =bit_string[pos : pos + 512]
_UpperCamelCase =[]
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
_UpperCamelCase =format(__SCREAMING_SNAKE_CASE , '''032b''' )
_UpperCamelCase =''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__SCREAMING_SNAKE_CASE , 2 )
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (a + b) % 2**32
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =preprocess(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =[int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_UpperCamelCase =0X6745_2301
_UpperCamelCase =0Xefcd_ab89
_UpperCamelCase =0X98ba_dcfe
_UpperCamelCase =0X1032_5476
_UpperCamelCase =[
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__SCREAMING_SNAKE_CASE ):
_UpperCamelCase =aa
_UpperCamelCase =ba
_UpperCamelCase =ca
_UpperCamelCase =da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_UpperCamelCase =d ^ (b & (c ^ d))
_UpperCamelCase =i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_UpperCamelCase =c ^ (d & (b ^ c))
_UpperCamelCase =(5 * i + 1) % 16
elif i <= 47:
_UpperCamelCase =b ^ c ^ d
_UpperCamelCase =(3 * i + 5) % 16
else:
_UpperCamelCase =c ^ (b | not_aa(__SCREAMING_SNAKE_CASE ))
_UpperCamelCase =(7 * i) % 16
_UpperCamelCase =(f + a + added_consts[i] + block_words[g]) % 2**32
_UpperCamelCase =d
_UpperCamelCase =c
_UpperCamelCase =b
_UpperCamelCase =sum_aa(__SCREAMING_SNAKE_CASE , left_rotate_aa(__SCREAMING_SNAKE_CASE , shift_amounts[i] ) )
# Add hashed chunk to running total
_UpperCamelCase =sum_aa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_UpperCamelCase =sum_aa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_UpperCamelCase =sum_aa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_UpperCamelCase =sum_aa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_UpperCamelCase =reformat_hex(__SCREAMING_SNAKE_CASE ) + reformat_hex(__SCREAMING_SNAKE_CASE ) + reformat_hex(__SCREAMING_SNAKE_CASE ) + reformat_hex(__SCREAMING_SNAKE_CASE )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase_ ( a_ ):
_A : Optional[int] = 'facebook/bart-large-mnli'
_A : Union[str, Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
_A : Dict = 'text_classifier'
_A : Union[str, Any] = AutoTokenizer
_A : Tuple = AutoModelForSequenceClassification
_A : Optional[int] = ['text', ['text']]
_A : Dict = ['text']
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setup()
UpperCAmelCase = self.model.config
UpperCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [f'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def UpperCamelCase_ ( self , snake_case__ ) -> str:
"""simple docstring"""
UpperCAmelCase = outputs.logits
UpperCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a : Any = logging.get_logger(__name__)
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : str , *UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 199 |
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def _SCREAMING_SNAKE_CASE ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
"""simple docstring"""
__A = 0.0
for coeff in reversed(__lowercase ):
__A = result * x + coeff
return result
if __name__ == "__main__":
__a : Dict = (0.0, 0.0, 5.0, 9.3, 7.0)
__a : Optional[Any] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 199 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class _a ( UpperCamelCase__ ):
_lowercase : Any = ['''input_features''', '''attention_mask''']
def __init__( self: Dict , UpperCamelCase_: str=80 , UpperCamelCase_: Any=16_000 , UpperCamelCase_: Union[str, Any]=80 , UpperCamelCase_: List[str]=0.0 , UpperCamelCase_: int=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Optional[Any]=True , **UpperCamelCase_: List[str] , ) -> int:
"""simple docstring"""
super().__init__(feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = num_mel_bins
lowercase__ = do_ceptral_normalize
lowercase__ = normalize_means
lowercase__ = normalize_vars
lowercase__ = True
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowercase__ = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 )
lowercase__ = ta_kaldi.fbank(UpperCamelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[bool] = True , UpperCamelCase_: Optional[bool] = True , UpperCamelCase_: float = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
lowercase__ = x[:input_length].mean(axis=0 )
lowercase__ = np.subtract(UpperCamelCase_ , UpperCamelCase_ )
if normalize_vars:
lowercase__ = x[:input_length].std(axis=0 )
lowercase__ = np.divide(UpperCamelCase_ , UpperCamelCase_ )
if input_length < x.shape[0]:
lowercase__ = padding_value
# make sure array is in float32
lowercase__ = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self: int , UpperCamelCase_: List[np.ndarray] , UpperCamelCase_: Optional[np.ndarray] = None ) -> List[np.ndarray]:
"""simple docstring"""
lowercase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCamelCase_ , UpperCamelCase_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCamelCase_ , UpperCamelCase_ )
]
def __call__( self: List[Any] , UpperCamelCase_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_: Union[bool, str, PaddingStrategy] = False , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[bool] = None , **UpperCamelCase_: List[Any] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowercase__ = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
lowercase__ = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [raw_speech]
# extract fbank features
lowercase__ = [self._extract_fbank_features(UpperCamelCase_ ) for waveform in raw_speech]
# convert into correct format for padding
lowercase__ = BatchFeature({'''input_features''': features} )
lowercase__ = self.pad(
UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
# make sure list is in array format
lowercase__ = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCamelCase_ ):
lowercase__ = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_features]
lowercase__ = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
lowercase__ = [np.asarray(UpperCamelCase_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowercase__ = (
np.array(UpperCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase_ , max_length=UpperCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase__ = self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCamelCase_ )
if return_tensors is not None:
lowercase__ = padded_inputs.convert_to_tensors(UpperCamelCase_ )
return padded_inputs
| 43 |
import random
from typing import Any
def UpperCamelCase( __UpperCamelCase : list ):
for _ in range(len(__UpperCamelCase ) ):
lowerCAmelCase_ : Union[str, Any] = random.randint(0 ,len(__UpperCamelCase ) - 1 )
lowerCAmelCase_ : List[Any] = random.randint(0 ,len(__UpperCamelCase ) - 1 )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = data[b], data[a]
return data
if __name__ == "__main__":
A__ : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
A__ : int = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 171 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ ( __lowerCAmelCase ):
def __a ( self : Tuple ):
'''simple docstring'''
a__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "width_multiplier" ) )
class UpperCamelCase__ :
def __init__( self : str , lowerCamelCase : int , lowerCamelCase : Optional[Any]=1_3 , lowerCamelCase : Any=6_4 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : List[str]="swish" , lowerCamelCase : str=3 , lowerCamelCase : Dict=3_2 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : str=True , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[int]=1_0 , lowerCamelCase : Any=None , lowerCamelCase : Tuple=0.25 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Any=0.0 , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = make_divisible(5_1_2 * width_multiplier , divisor=8 )
a__ = hidden_act
a__ = conv_kernel_size
a__ = output_stride
a__ = classifier_dropout_prob
a__ = use_labels
a__ = is_training
a__ = num_labels
a__ = initializer_range
a__ = scope
a__ = width_multiplier
a__ = ffn_dropout
a__ = attn_dropout
def __a ( self : Dict ):
'''simple docstring'''
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.num_labels )
a__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : Any ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __a ( self : List[str] , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : str ):
'''simple docstring'''
a__ = MobileViTVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self : Any , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a__ = self.num_labels
a__ = MobileViTVaForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Dict , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Any ):
'''simple docstring'''
a__ = self.num_labels
a__ = MobileViTVaForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
a__ = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a__ = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self : Optional[Any] ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ = config_and_inputs
a__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowerCAmelCase ,__lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Union[str, Any] = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Any = False
def __a ( self : Dict ):
'''simple docstring'''
a__ = MobileViTVaModelTester(self )
a__ = MobileViTVaConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def __a ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def __a ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def __a ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self : Optional[Any] ):
'''simple docstring'''
pass
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(lowerCamelCase )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __a ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : str ):
a__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
a__ = outputs.hidden_states
a__ = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
a__ = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __a ( self : Dict ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def __a ( self : int ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = MobileViTVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase () -> Optional[int]:
a__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Optional[int] ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
lowerCamelCase )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
a__ = model(**lowerCamelCase )
# verify the logits
a__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
a__ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
a__ = model.to(lowerCamelCase )
a__ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
a__ = prepare_img()
a__ = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
a__ = model(**lowerCamelCase )
a__ = outputs.logits
# verify the logits
a__ = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , lowerCamelCase )
a__ = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def __a ( self : int ):
'''simple docstring'''
a__ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
a__ = model.to(lowerCamelCase )
a__ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
a__ = prepare_img()
a__ = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
a__ = model(**lowerCamelCase )
a__ = outputs.logits.detach().cpu()
a__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(5_0, 6_0)] )
a__ = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
a__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
a__ = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 721 |
'''simple docstring'''
from __future__ import annotations
from math import pi
def _lowerCamelCase (__lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 520 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = DistilBertTokenizer
lowerCamelCase_ : Any = DistilBertTokenizerFast
lowerCamelCase_ : Union[str, Any] = True
@slow
def __UpperCAmelCase( self ):
__A : Any = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
__A : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCAmelCase )
__A : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCAmelCase )
__A : Any = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__A : str = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 520 | 1 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] , UpperCamelCase__ :List[str] , UpperCamelCase__ :str , UpperCamelCase__ :Any="attention" ) -> List[Any]:
snake_case__ : List[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
snake_case__ : Union[str, Any] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
snake_case__ : List[str] = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
snake_case__ : str = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __UpperCAmelCase ( UpperCamelCase__ :Any , UpperCamelCase__ :int , UpperCamelCase__ :int , UpperCamelCase__ :Any=False ) -> Optional[int]:
if split_mlp_wi:
snake_case__ : Any = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
snake_case__ : Union[str, Any] = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
snake_case__ : Any = (wi_a, wi_a)
else:
snake_case__ : List[str] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
snake_case__ : Optional[int] = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __UpperCAmelCase ( UpperCamelCase__ :Dict , UpperCamelCase__ :int , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Union[str, Any] ) -> str:
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def __UpperCAmelCase ( UpperCamelCase__ :dict , *, UpperCamelCase__ :int , UpperCamelCase__ :bool ) -> Union[str, Any]:
snake_case__ : Tuple = traverse_util.flatten_dict(variables['''target'''] )
snake_case__ : str = {'''/'''.join(UpperCamelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case__ : Optional[Any] = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCamelCase__ )
snake_case__ : List[str] = collections.OrderedDict()
# Shared embeddings.
snake_case__ : Union[str, Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase__ ):
# Block i, layer 0 (Self Attention).
snake_case__ : Dict = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , '''encoder''' , '''pre_attention_layer_norm''' )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = tax_attention_lookup(UpperCamelCase__ , UpperCamelCase__ , '''encoder''' , '''attention''' )
snake_case__ : Optional[int] = layer_norm
snake_case__ : Tuple = k.T
snake_case__ : Any = o.T
snake_case__ : Dict = q.T
snake_case__ : int = v.T
# Block i, layer 1 (MLP).
snake_case__ : Any = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , '''encoder''' , '''pre_mlp_layer_norm''' )
snake_case__ , snake_case__ : str = tax_mlp_lookup(UpperCamelCase__ , UpperCamelCase__ , '''encoder''' , UpperCamelCase__ )
snake_case__ : str = layer_norm
if split_mlp_wi:
snake_case__ : List[str] = wi[0].T
snake_case__ : str = wi[1].T
else:
snake_case__ : int = wi.T
snake_case__ : Any = wo.T
snake_case__ : Tuple = old[
'''encoder/relpos_bias/rel_embedding'''
].T
snake_case__ : List[str] = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase__ ):
# Block i, layer 0 (Self Attention).
snake_case__ : Tuple = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = tax_attention_lookup(UpperCamelCase__ , UpperCamelCase__ , '''decoder''' , '''self_attention''' )
snake_case__ : str = layer_norm
snake_case__ : Optional[int] = k.T
snake_case__ : Any = o.T
snake_case__ : Any = q.T
snake_case__ : List[Any] = v.T
# Block i, layer 1 (Cross Attention).
snake_case__ : str = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = tax_attention_lookup(UpperCamelCase__ , UpperCamelCase__ , '''decoder''' , '''encoder_decoder_attention''' )
snake_case__ : int = layer_norm
snake_case__ : Optional[Any] = k.T
snake_case__ : List[Any] = o.T
snake_case__ : Dict = q.T
snake_case__ : str = v.T
# Block i, layer 2 (MLP).
snake_case__ : Union[str, Any] = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , '''decoder''' , '''pre_mlp_layer_norm''' )
snake_case__ , snake_case__ : str = tax_mlp_lookup(UpperCamelCase__ , UpperCamelCase__ , '''decoder''' , UpperCamelCase__ )
snake_case__ : Optional[Any] = layer_norm
if split_mlp_wi:
snake_case__ : Union[str, Any] = wi[0].T
snake_case__ : int = wi[1].T
else:
snake_case__ : str = wi.T
snake_case__ : Any = wo.T
snake_case__ : str = old['''decoder/decoder_norm/scale''']
snake_case__ : str = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case__ : List[Any] = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] , UpperCamelCase__ :bool ) -> str:
snake_case__ : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case__ : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case__ : Tuple = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
snake_case__ : Optional[int] = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :Tuple , UpperCamelCase__ :List[str] , UpperCamelCase__ :Dict ) -> Tuple:
snake_case__ : List[str] = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
snake_case__ : Union[str, Any] = convert_tax_to_pytorch(UpperCamelCase__ , num_layers=config.num_layers , is_encoder_only=UpperCamelCase__ )
snake_case__ : List[Any] = make_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
def __UpperCAmelCase ( UpperCamelCase__ :Optional[int] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :bool = False ) -> Optional[Any]:
snake_case__ : Optional[Any] = TaConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case__ : Tuple = TaEncoderModel(UpperCamelCase__ )
else:
snake_case__ : Optional[Any] = TaForConditionalGeneration(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase__ )
print('''Done''' )
if __name__ == "__main__":
_lowercase : Tuple =argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
_lowercase : Optional[Any] =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 574 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any ={
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict =["ConditionalDetrFeatureExtractor"]
_lowercase : Optional[int] =["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 574 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ) -> Optional[int]:
__UpperCAmelCase =AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =AutoTokenizer.from_pretrained("""google/mt5-small""" )
__UpperCAmelCase =tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
__UpperCAmelCase =tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
__UpperCAmelCase =model(input_ids.to(__SCREAMING_SNAKE_CASE ) , labels=labels.to(__SCREAMING_SNAKE_CASE ) ).loss
__UpperCAmelCase =-(labels.shape[-1] * loss.item())
__UpperCAmelCase =-84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 68 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Dict = (DDPMScheduler,)
def _SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1_00, 87, 50, 1, 0]
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 284 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __lowerCamelCase ( __snake_case , __snake_case ):
lowerCamelCase_ : Any = 1
@register_to_config
def __init__( self , lowerCamelCase=2000 , lowerCamelCase=0.1 , lowerCamelCase=20 , lowerCamelCase=1e-3 ) -> Optional[Any]:
snake_case_ = None
snake_case_ = None
snake_case_ = None
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> int:
snake_case_ = torch.linspace(1 , self.config.sampling_eps , lowerCamelCase , device=lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> int:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
snake_case_ = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
snake_case_ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
snake_case_ = std.flatten()
while len(std.shape ) < len(score.shape ):
snake_case_ = std.unsqueeze(-1 )
snake_case_ = -score / std
# compute
snake_case_ = -1.0 / len(self.timesteps )
snake_case_ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
snake_case_ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
snake_case_ = beta_t.unsqueeze(-1 )
snake_case_ = -0.5 * beta_t * x
snake_case_ = torch.sqrt(lowerCamelCase )
snake_case_ = drift - diffusion**2 * score
snake_case_ = x + drift * dt
# add noise
snake_case_ = randn_tensor(x.shape , layout=x.layout , generator=lowerCamelCase , device=x.device , dtype=x.dtype )
snake_case_ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 702 |
from typing import Any
def UpperCamelCase( lowercase_ ) -> list[Any]:
'''simple docstring'''
if not input_list:
return []
snake_case_ = [input_list.count(lowercase_ ) for value in input_list]
snake_case_ = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCamelCase = 'pt'
elif is_tf_available():
lowerCamelCase = 'tf'
else:
lowerCamelCase = 'jax'
class A ( __a , unittest.TestCase ):
UpperCamelCase__ : Dict =PerceiverTokenizer
UpperCamelCase__ : Optional[Any] =False
def lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
super().setUp()
_lowerCamelCase : Union[str, Any] =PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowerCamelCase ( self : List[str] , **lowercase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCamelCase ( self : Dict , lowercase_ : Tuple , lowercase_ : int=False , lowercase_ : List[Any]=20 , lowercase_ : Optional[Any]=5 ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[int] =[]
for i in range(len(__lowerCAmelCase ) ):
try:
_lowerCamelCase : Union[str, Any] =tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowerCamelCase : Dict =list(filter(lambda lowercase_ : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __lowerCAmelCase ) )
_lowerCamelCase : int =list(filter(lambda lowercase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
_lowerCamelCase : Tuple =toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
_lowerCamelCase : List[Any] =toks + toks
# toks_str = [t[1] for t in toks]
_lowerCamelCase : Any =[t[0] for t in toks]
# Ensure consistency
_lowerCamelCase : Optional[Any] =tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
_lowerCamelCase : Union[str, Any] =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
_lowerCamelCase : Dict =' ' + output_txt
_lowerCamelCase : Dict =tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =self.perceiver_tokenizer
_lowerCamelCase : Union[str, Any] ='Unicode €.'
_lowerCamelCase : Union[str, Any] =tokenizer(__lowerCAmelCase )
_lowerCamelCase : List[str] =[4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , __lowerCAmelCase )
# decoding
_lowerCamelCase : Optional[int] =tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , '[CLS]Unicode €.[SEP]' )
_lowerCamelCase : Dict =tokenizer('e è é ê ë' )
_lowerCamelCase : int =[4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , __lowerCAmelCase )
# decoding
_lowerCamelCase : Any =tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowerCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.perceiver_tokenizer
_lowerCamelCase : List[str] =['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowerCamelCase : Union[str, Any] =[4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_lowerCamelCase : List[Any] =tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
if FRAMEWORK != "jax":
_lowerCamelCase : Any =list(batch.input_ids.numpy()[0] )
else:
_lowerCamelCase : Dict =list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCamelCase ( self : str ) -> str:
"""simple docstring"""
_lowerCamelCase : str =self.perceiver_tokenizer
_lowerCamelCase : Dict =['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCamelCase : List[str] =tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , __lowerCAmelCase )
self.assertIn('attention_mask' , __lowerCAmelCase )
self.assertNotIn('decoder_input_ids' , __lowerCAmelCase )
self.assertNotIn('decoder_attention_mask' , __lowerCAmelCase )
def lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_lowerCamelCase : int =self.perceiver_tokenizer
_lowerCamelCase : List[Any] =[
'Summary of the text.',
'Another summary.',
]
_lowerCamelCase : Optional[Any] =tokenizer(
text_target=__lowerCAmelCase , max_length=32 , padding='max_length' , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Any =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowerCamelCase : Any =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCamelCase : Optional[Any] =tempfile.mkdtemp()
_lowerCamelCase : Tuple =' He is very happy, UNwant\u00E9d,running'
_lowerCamelCase : int =tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[Any] =tokenizer.__class__.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : str =after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
_lowerCamelCase : str =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCamelCase : Any =tempfile.mkdtemp()
_lowerCamelCase : Union[str, Any] =' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowerCamelCase : str =tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowerCamelCase : Union[str, Any] =tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple =tokenizer.__class__.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : str =after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowerCamelCase : List[Any] =tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCAmelCase )
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowerCamelCase : int =json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowerCamelCase : Optional[Any] =json.load(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] =[F'''<extra_id_{i}>''' for i in range(125 )]
_lowerCamelCase : Tuple =added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowerCamelCase : Dict =added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(__lowerCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCamelCase : List[Any] =tokenizer_class.from_pretrained(
__lowerCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCamelCase : List[Any] =added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__lowerCAmelCase )]
_lowerCamelCase : Any =tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : int =self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] =self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowerCamelCase : List[str] =['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_lowerCamelCase : Optional[Any] =tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
| 464 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case = [0, 25, 50]
snake_case = [25, 50, 75]
snake_case = fuzz.membership.trimf(X, abca)
snake_case = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case = np.ones(75)
snake_case = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__A : Union[str, Any] = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class lowercase ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : str = " " ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = sentence_delimiter
def a__ ( self : Any , __lowerCamelCase : str ) -> Any:
'''simple docstring'''
return list(__lowerCamelCase )
def a__ ( self : str , __lowerCamelCase : List[str] ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = []
for sent_idx, sentence in enumerate(__lowerCamelCase ):
chars.extend(self.process_string(__lowerCamelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__lowerCamelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
__A : Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__A : Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__A : int = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
__A : int = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
__A : str = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
'''simple docstring'''
def a__ ( self : str ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def a__ ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Any=False ) -> Dict:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__lowerCamelCase , __lowerCamelCase , truth_transform=__lowerCamelCase , hypothesis_transform=__lowerCamelCase , )["wer"]
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for prediction, reference in zip(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ = jiwer.compute_measures(
__lowerCamelCase , __lowerCamelCase , truth_transform=__lowerCamelCase , hypothesis_transform=__lowerCamelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 187 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Dict ) -> str:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = d_embed
lowerCamelCase__ = d_proj
lowerCamelCase__ = cutoffs + [vocab_size]
lowerCamelCase__ = [0] + self.cutoffs
lowerCamelCase__ = div_val
lowerCamelCase__ = self.cutoffs[0]
lowerCamelCase__ = len(self.cutoffs ) - 1
lowerCamelCase__ = self.shortlist_size + self.n_clusters
lowerCamelCase__ = keep_order
lowerCamelCase__ = []
lowerCamelCase__ = []
def a__ ( self : Optional[int] , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
if self.n_clusters > 0:
lowerCamelCase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_weight" )
lowerCamelCase__ = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCamelCase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' , )
self.out_projs.append(__lowerCamelCase )
else:
self.out_projs.append(__lowerCamelCase )
lowerCamelCase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ = self.d_embed // (self.div_val**i)
lowerCamelCase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' )
self.out_projs.append(__lowerCamelCase )
lowerCamelCase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__lowerCamelCase )
@staticmethod
def a__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = x
if proj is not None:
lowerCamelCase__ = tf.einsum("ibd,ed->ibe" , __lowerCamelCase , __lowerCamelCase )
return tf.einsum("ibd,nd->ibn" , __lowerCamelCase , __lowerCamelCase ) + b
@staticmethod
def a__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = shape_list(__lowerCamelCase )
lowerCamelCase__ = tf.range(lp_size[0] , dtype=target.dtype )
lowerCamelCase__ = tf.stack([r, target] , 1 )
return tf.gather_nd(__lowerCamelCase , __lowerCamelCase )
def a__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=False ) -> int:
'''simple docstring'''
lowerCamelCase__ = 0
if self.n_clusters == 0:
lowerCamelCase__ = self._logit(__lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCamelCase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowerCamelCase , logits=__lowerCamelCase )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase , axis=-1 )
else:
lowerCamelCase__ = shape_list(__lowerCamelCase )
lowerCamelCase__ = []
lowerCamelCase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCamelCase__ = (target >= l_idx) & (target < r_idx)
lowerCamelCase__ = tf.where(__lowerCamelCase )
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase ) - l_idx
if self.div_val == 1:
lowerCamelCase__ = self.out_layers[0][0][l_idx:r_idx]
lowerCamelCase__ = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCamelCase__ = self.out_layers[i][0]
lowerCamelCase__ = self.out_layers[i][1]
if i == 0:
lowerCamelCase__ = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCamelCase__ = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCamelCase__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[0] )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
else:
lowerCamelCase__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[i] )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase )
lowerCamelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCamelCase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__lowerCamelCase )
if target is not None:
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__lowerCamelCase , -cur_logprob , shape_list(__lowerCamelCase ) )
lowerCamelCase__ = tf.concat(__lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
lowerCamelCase__ = tf.reduce_mean(__lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__lowerCamelCase , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 187 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
a__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 279 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return None
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
return None
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any =[
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase ( self ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> int:
from transformers import BertModel
UpperCamelCase :int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
vocab_file.flush()
UpperCamelCase :Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCamelCase :Union[str, Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , SCREAMING_SNAKE_CASE_ )
@require_tf
@slow
def UpperCAmelCase ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :Tuple = self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = quantize(Path(SCREAMING_SNAKE_CASE_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :str = self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = quantize(SCREAMING_SNAKE_CASE_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[str]:
from transformers import BertModel
UpperCamelCase :List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[Any]:
from transformers import TFBertModel
UpperCamelCase :Optional[Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''tf''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Tuple = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :int = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
UpperCamelCase :Tuple = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
UpperCamelCase , UpperCamelCase :Any = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCamelCase , UpperCamelCase :Tuple = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :str = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 658 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
'''simple docstring'''
def A_ ( snake_case = 100 ):
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:Optional[int] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 465 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __lowercase ( unittest.TestCase ):
def __init__( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=1_3 , __lowerCamelCase : Optional[int]=3_0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=3 , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple=3_2 , __lowerCamelCase : Dict=5 , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=3_7 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=1_0 , __lowerCamelCase : Union[str, Any]=0.02 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxViTModel(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (self.image_size, self.image_size)
UpperCAmelCase = (self.patch_size, self.patch_size)
UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = FlaxViTForImageClassification(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = FlaxViTForImageClassification(__lowerCamelCase )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __lowercase ( __snake_case , unittest.TestCase ):
UpperCamelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
UpperCAmelCase = FlaxViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowercase ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__lowerCamelCase : int , **__lowerCamelCase : Optional[Any] ):
return model(pixel_values=__lowerCamelCase , **__lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
UpperCAmelCase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(__lowerCamelCase )
| 377 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__a = datasets.logging.get_logger(__name__)
__a = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
__a = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
__a = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="dummy_doc" ) ->Optional[Any]:
UpperCAmelCase = {doc: key_lines}
UpperCAmelCase = {doc: sys_lines}
UpperCAmelCase = {}
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase , UpperCAmelCase = reader.get_doc_mentions(lowerCAmelCase_ , key_doc_lines[doc] , lowerCAmelCase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase = reader.set_annotated_parse_trees(lowerCAmelCase_ , key_doc_lines[doc] , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = reader.get_doc_mentions(lowerCAmelCase_ , sys_doc_lines[doc] , lowerCAmelCase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase = reader.set_annotated_parse_trees(lowerCAmelCase_ , key_doc_lines[doc] , lowerCAmelCase_ , lowerCAmelCase_ )
if remove_nested:
UpperCAmelCase , UpperCAmelCase = reader.remove_nested_coref_mentions(lowerCAmelCase_ , lowerCAmelCase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase , UpperCAmelCase = reader.remove_nested_coref_mentions(lowerCAmelCase_ , lowerCAmelCase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase = reader.get_mention_assignments(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = reader.get_mention_assignments(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"""Number of resulting singleton clusters in the key """
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"""files, respectively""" )
return doc_coref_infos
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
UpperCAmelCase = get_coref_infos(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = {}
UpperCAmelCase = 0
UpperCAmelCase = 0
for name, metric in metrics:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = evaluator.evaluate_documents(lowerCAmelCase_ , lowerCAmelCase_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(1_0 ) , F"""Recall: {recall * 1_0_0:.2f}""" , F""" Precision: {precision * 1_0_0:.2f}""" , F""" F1: {fa * 1_0_0:.2f}""" , )
if conll_subparts_num == 3:
UpperCAmelCase = (conll / 3) * 1_0_0
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _UpperCamelCase ( lowerCAmelCase_ ) ->List[Any]:
UpperCAmelCase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
UpperCAmelCase = line.split()[5]
if not parse_col == "-":
UpperCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _lowercase ( self : int ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : List[Any]=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
UpperCAmelCase = util.check_gold_parse_annotation(__lowerCamelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase = evaluate(
key_lines=__lowerCamelCase , sys_lines=__lowerCamelCase , metrics=__lowerCamelCase , NP_only=__lowerCamelCase , remove_nested=__lowerCamelCase , keep_singletons=__lowerCamelCase , min_span=__lowerCamelCase , )
return score
| 377 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCamelCase = logging.get_logger(__name__)
# General docstring
__lowerCamelCase = 'ResNetConfig'
# Base docstring
__lowerCamelCase = 'microsoft/resnet-50'
__lowerCamelCase = [1, 2_048, 7, 7]
# Image classification docstring
__lowerCamelCase = 'microsoft/resnet-50'
__lowerCamelCase = 'tiger cat'
__lowerCamelCase = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 3 , lowercase = 1 , lowercase = "relu" ) -> str:
super().__init__()
_a : str = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_a : Optional[Any] = nn.BatchNormad(lowercase )
_a : int = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__( self , lowercase ) -> Tensor:
_a : Union[str, Any] = self.convolution(lowercase )
_a : List[str] = self.normalization(lowercase )
_a : List[str] = self.activation(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase ) -> Optional[int]:
super().__init__()
_a : int = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_a : Union[str, Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_a : Union[str, Any] = config.num_channels
def snake_case__( self , lowercase ) -> Tensor:
_a : Any = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_a : Any = self.embedder(lowercase )
_a : Optional[int] = self.pooler(lowercase )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 2 ) -> Dict:
super().__init__()
_a : str = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_a : Union[str, Any] = nn.BatchNormad(lowercase )
def snake_case__( self , lowercase ) -> Tensor:
_a : Optional[int] = self.convolution(lowercase )
_a : Any = self.normalization(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 1 , lowercase = "relu" ) -> List[str]:
super().__init__()
_a : List[str] = in_channels != out_channels or stride != 1
_a : List[Any] = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_a : List[str] = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_a : Dict = ACTaFN[activation]
def snake_case__( self , lowercase ) -> Optional[int]:
_a : List[Any] = hidden_state
_a : Optional[Any] = self.layer(lowercase )
_a : int = self.shortcut(lowercase )
hidden_state += residual
_a : Dict = self.activation(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 1 , lowercase = "relu" , lowercase = 4 ) -> Dict:
super().__init__()
_a : Union[str, Any] = in_channels != out_channels or stride != 1
_a : Union[str, Any] = out_channels // reduction
_a : List[str] = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_a : Dict = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_a : List[str] = ACTaFN[activation]
def snake_case__( self , lowercase ) -> str:
_a : List[str] = hidden_state
_a : Optional[int] = self.layer(lowercase )
_a : Any = self.shortcut(lowercase )
hidden_state += residual
_a : Union[str, Any] = self.activation(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , ) -> Optional[int]:
super().__init__()
_a : List[str] = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
_a : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def snake_case__( self , lowercase ) -> Tensor:
_a : Optional[int] = input
for layer in self.layers:
_a : Any = layer(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase ) -> Any:
super().__init__()
_a : Tuple = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_a : str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def snake_case__( self , lowercase , lowercase = False , lowercase = True ) -> BaseModelOutputWithNoAttention:
_a : str = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a : Dict = hidden_states + (hidden_state,)
_a : List[str] = stage_module(lowercase )
if output_hidden_states:
_a : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = ResNetConfig
lowercase = '''resnet'''
lowercase = '''pixel_values'''
lowercase = True
def snake_case__( self , lowercase ) -> Any:
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case__( self , lowercase , lowercase=False ) -> int:
if isinstance(lowercase , lowercase ):
_a : List[str] = value
__lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase ):
def __init__( self , lowercase ) -> int:
super().__init__(lowercase )
_a : Any = config
_a : Optional[int] = ResNetEmbeddings(lowercase )
_a : Any = ResNetEncoder(lowercase )
_a : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__( self , lowercase , lowercase = None , lowercase = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_a : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Optional[Any] = self.embedder(lowercase )
_a : Tuple = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_a : str = encoder_outputs[0]
_a : str = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase ):
def __init__( self , lowercase ) -> str:
super().__init__(lowercase )
_a : str = config.num_labels
_a : List[str] = ResNetModel(lowercase )
# classification head
_a : Optional[int] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ) -> ImageClassifierOutputWithNoAttention:
_a : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_a : str = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_a : int = outputs.pooler_output if return_dict else outputs[1]
_a : str = self.classifier(lowercase )
_a : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a : Any = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a : Optional[Any] = '''single_label_classification'''
else:
_a : Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
_a : Optional[Any] = MSELoss()
if self.num_labels == 1:
_a : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_a : List[str] = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_a : str = CrossEntropyLoss()
_a : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a : List[Any] = BCEWithLogitsLoss()
_a : List[Any] = loss_fct(lowercase , lowercase )
if not return_dict:
_a : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase ):
def __init__( self , lowercase ) -> str:
super().__init__(lowercase )
super()._init_backbone(lowercase )
_a : Optional[int] = [config.embedding_size] + config.hidden_sizes
_a : Any = ResNetEmbeddings(lowercase )
_a : List[str] = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def snake_case__( self , lowercase , lowercase = None , lowercase = None ) -> BackboneOutput:
_a : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_a : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : List[Any] = self.embedder(lowercase )
_a : Tuple = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_a : str = outputs.hidden_states
_a : Tuple = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_a : Dict = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , )
| 709 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = ['''input_values''', '''padding_mask''']
def __init__( self , lowercase = 1 , lowercase = 24_000 , lowercase = 0.0 , lowercase = None , lowercase = None , **lowercase , ) -> Union[str, Any]:
super().__init__(feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , **lowercase )
_a : int = chunk_length_s
_a : Any = overlap
@property
def snake_case__( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , lowercase , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
_a : int = True
_a : Union[str, Any] = bool(
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_a : Any = [np.asarray(lowercase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
_a : int = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_a : Tuple = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_a : str = [np.asarray(lowercase ).T]
# verify inputs are valid
for idx, example in enumerate(lowercase ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
_a : Any = None
_a : List[str] = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_a : Union[str, Any] = min(array.shape[0] for array in raw_audio )
_a : int = int(np.floor(max_length / self.chunk_stride ) )
_a : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_a : int = max(array.shape[0] for array in raw_audio )
_a : Any = int(np.ceil(max_length / self.chunk_stride ) )
_a : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
_a : Any = '''max_length'''
else:
_a : Tuple = input_values
# normal padding on batch
if padded_inputs is None:
_a : List[Any] = self.pad(
lowercase , max_length=lowercase , truncation=lowercase , padding=lowercase , return_attention_mask=lowercase , )
if padding:
_a : str = padded_inputs.pop('''attention_mask''' )
_a : List[str] = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
_a : str = example[..., None]
input_values.append(example.T )
_a : Union[str, Any] = input_values
if return_tensors is not None:
_a : Optional[Any] = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs
| 307 | 0 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def UpperCAmelCase ( a__ ):
'''simple docstring'''
if num <= 0:
raise ValueError('math domain error' )
return quad(lowerCamelCase_ , 0 , lowerCamelCase_ , args=(lowerCamelCase_) )[0]
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
return math.pow(lowerCamelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 553 |
import math
from datetime import datetime, timedelta
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Union[str, Any] = year % 1_9
__a : int = year % 4
__a : Optional[int] = year % 7
__a : Dict = math.floor(year / 1_0_0 )
__a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__a : Union[str, Any] = leap_day_inhibits / 4
__a : str = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__a : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_8 )
else:
return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 47 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ )-> int:
"""simple docstring"""
UpperCamelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowerCamelCase__ ( UpperCAmelCase_ = 1_00 )-> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase = pre_numerator
UpperCamelCase = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase = cur_numerator
UpperCamelCase = e_cont * pre_numerator + temp
return sum_digits(UpperCAmelCase_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 556 |
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ = 4_00_00_00 )-> int:
"""simple docstring"""
UpperCamelCase = [0, 1]
UpperCamelCase = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCamelCase = 0
for j in range(len(UpperCAmelCase_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 556 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 13 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = 128 , SCREAMING_SNAKE_CASE__ : int=[16, 32, 64, 128] , SCREAMING_SNAKE_CASE__ : int = 7 , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 37 , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 128 , SCREAMING_SNAKE_CASE__ : List[int] = [2, 2, 2, 2] , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , ) -> Tuple:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = encoder_stride
lowerCAmelCase__ = num_attention_outputs
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = embed_dim + 1
lowerCAmelCase__ = resolution
lowerCAmelCase__ = depths
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = dim
lowerCAmelCase__ = mlp_expansion_ratio
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : int ) -> List[str]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
lowerCAmelCase__ = TFEfficientFormerModel(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self : Dict ) -> Optional[Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : List[str] ) -> int:
lowerCAmelCase__ = TFEfficientFormerModelTester(self )
lowerCAmelCase__ = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : Any ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def a ( self : str ) -> Dict:
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def a ( self : Optional[Any] ) -> List[str]:
pass
def a ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Union[str, Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , training=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
if hasattr(self.model_tester , "encoder_seq_length" ):
lowerCAmelCase__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
lowerCAmelCase__ = seq_length * self.model_tester.chunk_length
else:
lowerCAmelCase__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCAmelCase__ = outputs.decoder_hidden_states
self.asseretIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = getattr(self.model_tester , "seq_length" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = getattr(self.model_tester , "decoder_seq_length" , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> Tuple:
lowerCAmelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def a ( self : List[Any] ) -> List[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def a ( self : List[str] ) -> Union[str, Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Any:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : Optional[Any] ) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFEfficientFormerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
lowerCAmelCase__ = getattr(self.model_tester , "seq_length" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = getattr(self.model_tester , "encoder_seq_length" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = getattr(self.model_tester , "key_length" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = getattr(self.model_tester , "chunk_length" , SCREAMING_SNAKE_CASE__ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
lowerCAmelCase__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , training=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , training=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def a ( self : Union[str, Any] ) -> Any:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCAmelCase__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=SCREAMING_SNAKE_CASE__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(outputs_dict is not None )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Dict ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def a ( self : str ) -> Optional[Any]:
lowerCAmelCase__ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="tf" )
# forward pass
lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@slow
def a ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="tf" )
# forward pass
lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 61 |
import operator as op
UpperCamelCase = 'scaler.pt'
UpperCamelCase = 'pytorch_model'
UpperCamelCase = 'random_states'
UpperCamelCase = 'optimizer'
UpperCamelCase = 'scheduler'
UpperCamelCase = 'pytorch_model.bin'
UpperCamelCase = 'pytorch_model.bin.index.json'
UpperCamelCase = 'model.safetensors'
UpperCamelCase = 'model.safetensors.index.json'
UpperCamelCase = '1.10.2'
UpperCamelCase = 'py38'
UpperCamelCase = '4.17.0'
UpperCamelCase = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
UpperCamelCase = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
UpperCamelCase = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
UpperCamelCase = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
UpperCamelCase = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
UpperCamelCase = '2.0.1'
UpperCamelCase = ['pdsh', 'standard', 'openmpi', 'mvapich']
UpperCamelCase = ['default', 'reduce-overhead', 'max-autotune']
UpperCamelCase = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCamelCase = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
UpperCamelCase = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
UpperCamelCase = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 61 | 1 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int]=1_3 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : List[Any]=9_9 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE_ : List[str]=5 , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Optional[int]=4 , ) -> int:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_attention_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_choices
def _lowercase ( self : int ) -> List[str]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_attention_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self : Optional[Any] ) -> Any:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowercase ( self : Tuple ) -> str:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = True
lowercase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[str] = True
a :str = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self : Dict ) -> int:
lowercase_ = FlaxBertModelTester(self )
@slow
def _lowercase ( self : Union[str, Any] ) -> Dict:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
lowercase_ = FlaxBertModel.from_pretrained('''bert-base-cased''' )
lowercase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 718 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ) -> int:
lowercase_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[int] ) -> str:
lowercase_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Tuple ) -> Tuple:
lowercase_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[Any] ) -> List[str]:
lowercase_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Any ) -> Optional[Any]:
lowercase_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : List[Any] ) -> int:
lowercase_ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Any ) -> int:
# pass variant but use the non-variant filenames
lowercase_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : str ) -> List[str]:
lowercase_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
lowercase_ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Dict ) -> Any:
# pass variant but use the non-variant filenames
lowercase_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
lowercase_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowercase_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
| 409 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_A = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __UpperCamelCase ( _A ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __UpperCamelCase ( _A , _A ):
if args.student_type == "roberta":
lowerCAmelCase_ = False
elif args.student_type == "gpt2":
lowerCAmelCase_ = False
def __UpperCamelCase ( _A , _A ):
if args.student_type == "roberta":
lowerCAmelCase_ = False
def __UpperCamelCase ( ):
lowerCAmelCase_ = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__lowerCAmelCase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__lowerCAmelCase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__lowerCAmelCase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__lowerCAmelCase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__lowerCAmelCase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__lowerCAmelCase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__lowerCAmelCase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__lowerCAmelCase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__lowerCAmelCase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.1_5 , type=__lowerCAmelCase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__lowerCAmelCase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__lowerCAmelCase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__lowerCAmelCase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__lowerCAmelCase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__lowerCAmelCase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__lowerCAmelCase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__lowerCAmelCase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__lowerCAmelCase , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=__lowerCAmelCase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__lowerCAmelCase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=__lowerCAmelCase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=__lowerCAmelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__lowerCAmelCase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.0_2 , type=__lowerCAmelCase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__lowerCAmelCase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__lowerCAmelCase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__lowerCAmelCase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__lowerCAmelCase , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__lowerCAmelCase , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__lowerCAmelCase , default=4000 , help='''Checkpoint interval.''' )
lowerCAmelCase_ = parser.parse_args()
sanity_checks(__lowerCAmelCase )
# ARGS #
init_gpu_params(__lowerCAmelCase )
set_seed(__lowerCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__lowerCAmelCase ) , __lowerCAmelCase , indent=4 )
git_log(args.dump_path )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = MODEL_CLASSES[args.student_type]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCAmelCase_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCAmelCase_ = tokenizer.all_special_tokens.index(__lowerCAmelCase )
lowerCAmelCase_ = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
lowerCAmelCase_ = special_tok_ids
lowerCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
lowerCAmelCase_ = pickle.load(__lowerCAmelCase )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
lowerCAmelCase_ = pickle.load(__lowerCAmelCase )
lowerCAmelCase_ = np.maximum(__lowerCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCAmelCase_ = 0.0 # do not predict special tokens
lowerCAmelCase_ = torch.from_numpy(__lowerCAmelCase )
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = LmSeqsDataset(params=__lowerCAmelCase , data=__lowerCAmelCase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
lowerCAmelCase_ = student_config_class.from_pretrained(args.student_config )
lowerCAmelCase_ = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
lowerCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowerCAmelCase )
else:
lowerCAmelCase_ = student_model_class(__lowerCAmelCase )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
lowerCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowerCAmelCase )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__lowerCAmelCase , __lowerCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__lowerCAmelCase , __lowerCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCAmelCase_ = Distiller(
params=__lowerCAmelCase , dataset=__lowerCAmelCase , token_probs=__lowerCAmelCase , student=__lowerCAmelCase , teacher=__lowerCAmelCase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 431 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = IFImgaImgSuperResolutionPipeline
__lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 33 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _UpperCamelCase , )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RobertaConfig
SCREAMING_SNAKE_CASE_ : Optional[Any] = """roberta"""
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
super().__init__(__lowerCamelCase )
a = RobertaEmbeddings(__lowerCamelCase )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _UpperCamelCase , )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RobertaConfig
SCREAMING_SNAKE_CASE_ : int = """roberta"""
def __init__( self : List[str] , __lowerCamelCase : Tuple ) -> Optional[int]:
super().__init__(__lowerCamelCase )
a = config.num_labels
a = config.num_hidden_layers
a = DeeRobertaModel(__lowerCamelCase )
a = nn.Dropout(config.hidden_dropout_prob )
a = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Any=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=-1 , __lowerCamelCase : Tuple=False , ) -> str:
a = self.num_layers
try:
a = self.roberta(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , position_ids=__lowerCamelCase , head_mask=__lowerCamelCase , inputs_embeds=__lowerCamelCase , )
a = outputs[1]
a = self.dropout(__lowerCamelCase )
a = self.classifier(__lowerCamelCase )
a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
a = e.message
a = e.exit_layer
a = outputs[0]
if not self.training:
a = entropy(__lowerCamelCase )
a = []
a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
a = MSELoss()
a = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
a = CrossEntropyLoss()
a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
a = []
for highway_exit in outputs[-1]:
a = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
a = MSELoss()
a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
a = CrossEntropyLoss()
a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowerCamelCase )
if train_highway:
a = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
a = (loss,) + outputs
if not self.training:
a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger("""transformers.models.speecht5""")
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
hf_model.apply_weight_norm()
__SCREAMING_SNAKE_CASE : Tuple = checkpoint['''input_conv.weight_g''']
__SCREAMING_SNAKE_CASE : Any = checkpoint['''input_conv.weight_v''']
__SCREAMING_SNAKE_CASE : Optional[int] = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
__SCREAMING_SNAKE_CASE : Any = checkpoint[F'''upsamples.{i}.1.weight_g''']
__SCREAMING_SNAKE_CASE : List[Any] = checkpoint[F'''upsamples.{i}.1.weight_v''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
__SCREAMING_SNAKE_CASE : str = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
__SCREAMING_SNAKE_CASE : int = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
__SCREAMING_SNAKE_CASE : Dict = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
__SCREAMING_SNAKE_CASE : Dict = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
__SCREAMING_SNAKE_CASE : List[Any] = checkpoint['''output_conv.1.weight_g''']
__SCREAMING_SNAKE_CASE : Tuple = checkpoint['''output_conv.1.weight_v''']
__SCREAMING_SNAKE_CASE : List[Any] = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def a__ ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , ):
"""simple docstring"""
if config_path is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(snake_case )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechTaHifiGanConfig()
__SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(snake_case )
__SCREAMING_SNAKE_CASE : str = torch.load(snake_case )
load_weights(orig_checkpoint['''model''']['''generator'''] , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = np.load(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = stats[1].reshape(-1 )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(snake_case ).float()
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(snake_case ).float()
model.save_pretrained(snake_case )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(snake_case )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 74 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : int = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Tuple = replicate(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = shard(_A )
__SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2'''
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained(
_A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE : List[str] = scheduler_params
__SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : List[Any] = jax.device_count()
__SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt]
__SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A )
__SCREAMING_SNAKE_CASE : List[str] = shard(_A )
__SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 74 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
__a : list[list[Edge]] = [[] for _ in range(_UpperCAmelCase )]
__a : Dict = size
def __getitem__( self , _UpperCAmelCase ):
return iter(self._graph[vertex] )
@property
def _lowerCamelCase ( self ):
return self._size
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(_UpperCAmelCase , _UpperCAmelCase ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = deque([start_vertex] )
__a : list[int | None] = [None] * self.size
__a : int = 0
while queue:
__a : Any = queue.popleft()
__a : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__a : List[str] = current_distance + edge.weight
__a : str = distances[edge.destination_vertex]
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
__a : str = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __A ( a_ :Tuple) -> Dict:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_)
def __A ( a_ :Any) -> int:
from transformers.testing_utils import pytest_terminal_summary_main
__a : str = terminalreporter.config.getoption('''--make-reports''')
if make_reports:
pytest_terminal_summary_main(a_ , id=a_)
| 101 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str=0 ):
"""simple docstring"""
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=float("inf" ) ):
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowerCamelCase : Optional[int] = current_dis
return min_dis
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : str=float("inf" ) ):
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ):
for j in range(max(0 , i - 6 ) , _lowerCAmelCase ):
_lowerCamelCase : Any = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowerCamelCase : Optional[int] = current_dis
return min_dis
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase )
# recursion
_lowerCamelCase : int = points_counts // 2
_lowerCamelCase : Dict = closest_pair_of_points_sqr(
_lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = closest_pair_of_points_sqr(
_lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
_lowerCamelCase : Tuple = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = dis_between_closest_in_strip(
_lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase )
return min(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = column_based_sort(_lowerCAmelCase , column=0 )
_lowerCamelCase : int = column_based_sort(_lowerCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 44 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : CLIPSegForImageSegmentation , lowerCamelCase : CLIPSegProcessor , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase : StableDiffusionSafetyChecker , lowerCamelCase : CLIPImageProcessor , ) -> Tuple:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
__snake_case : Tuple = (
F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__snake_case : Any = dict(scheduler.config )
__snake_case : List[Any] = 1
__snake_case : Tuple = FrozenDict(lowerCamelCase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
__snake_case : List[str] = (
F'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__snake_case : List[str] = dict(scheduler.config )
__snake_case : List[str] = True
__snake_case : Any = FrozenDict(lowerCamelCase )
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowerCamelCase , segmentation_processor=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , )
def __snake_case ( self : Dict , lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Any:
self.enable_attention_slicing(lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__snake_case : Optional[int] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : int ) -> Any:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCamelCase : str , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Dict , ) -> List[str]:
__snake_case : Tuple = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
__snake_case : str = self.segmentation_model(**lowerCamelCase )
__snake_case : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__snake_case : List[Any] = self.numpy_to_pil(lowerCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__snake_case : Tuple = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCamelCase , image=lowerCamelCase , mask_image=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , )
| 81 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(UpperCAmelCase ):
lowercase : List[str] =AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase : Dict =FlaxAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(UpperCAmelCase ):
lowercase : int =AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase : Optional[int] =FlaxAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase : List[str] =AutoTokenizer.from_pretrained(UpperCAmelCase )
lowercase : List[str] =FlaxBertModel.from_pretrained(UpperCAmelCase )
lowercase : Dict =tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCAmelCase : Optional[int] ):
return model(**UpperCAmelCase )
eval(**UpperCAmelCase ).block_until_ready()
@slow
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase : Optional[Any] =AutoTokenizer.from_pretrained(UpperCAmelCase )
lowercase : Tuple =FlaxRobertaModel.from_pretrained(UpperCAmelCase )
lowercase : Tuple =tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCAmelCase : Optional[int] ):
return model(**UpperCAmelCase )
eval(**UpperCAmelCase ).block_until_ready()
def A__ ( self : str ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowercase : Optional[int] =FlaxAutoModel.from_pretrained('''bert-base''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Optional[int] =FlaxAutoModel.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
lowercase : Union[str, Any] =FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase , '''Use `from_pt=True` to load this model''' ):
lowercase : str =FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 8 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCamelCase : float
__lowerCamelCase : TreeNode | None = None
__lowerCamelCase : TreeNode | None = None
def snake_case__ ( SCREAMING_SNAKE_CASE_ : TreeNode | None ):
'''simple docstring'''
def is_valid_tree(SCREAMING_SNAKE_CASE_ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
SCREAMING_SNAKE_CASE_ : TreeNode | None , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , SCREAMING_SNAKE_CASE_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , SCREAMING_SNAKE_CASE_ )
)
return is_binary_search_tree_recursive_check(SCREAMING_SNAKE_CASE_ , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 5_000 ):
'''simple docstring'''
lowercase__ : int = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE_ )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : Tuple = pentagonal_nums[j]
lowercase__ : Union[str, Any] = pentagonal_i + pentagonal_j
lowercase__ : int = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE_ ) and is_pentagonal(SCREAMING_SNAKE_CASE_ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 164 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 503 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__UpperCAmelCase = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 503 | 1 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowercase : int = """src/transformers"""
_lowercase : str = """docs/source/en/tasks"""
def lowerCamelCase__ ( A : List[str] , A : str , A : Optional[int] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start prompt.
UpperCAmelCase = 0
while not lines[start_index].startswith(A ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
while not lines[end_index].startswith(A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Tuple = direct_transformers_import(TRANSFORMERS_PATH)
_lowercase : str = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowercase : Optional[int] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def lowerCamelCase__ ( A : Dict ):
'''simple docstring'''
UpperCAmelCase = TASK_GUIDE_TO_MODELS[task_guide]
UpperCAmelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A , set() )
UpperCAmelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowerCamelCase__ ( A : Dict , A : Optional[Any]=False ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = _find_text_in_file(
filename=os.path.join(A , A ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
UpperCAmelCase = get_model_list_for_task(A )
if current_list != new_list:
if overwrite:
with open(os.path.join(A , A ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
''' to fix this.''' )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowercase : str = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 210 |
'''simple docstring'''
def lowerCamelCase__ ( A : int = 50 ):
'''simple docstring'''
UpperCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 210 | 1 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 714 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.