code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE = TypeVar('KT')
SCREAMING_SNAKE_CASE = TypeVar('VT')
class __UpperCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , __A = "root" , __A = None ):
__a = key
__a = value
__a = []
def __repr__( self ):
return f'''Node({self.key}: {self.value})'''
@property
def snake_case_ ( self ):
return len(self.forward )
class __UpperCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , __A = 0.5 , __A = 16 ):
__a = Node[KT, VT]()
__a = 0
__a = p
__a = max_level
def __str__( self ):
__a = list(self )
if len(__A ) == 0:
return f'''SkipList(level={self.level})'''
__a = max((len(str(__A ) ) for item in items) , default=4 )
__a = max(__A , 4 ) + 4
__a = self.head
__a = []
__a = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(__A , """-""" ) + """* """ * len(__A ) )
lines.append(""" """ * label_size + """| """ * len(__A ) )
while len(node.forward ) != 0:
__a = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(__A , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(__A ) )
__a = node.forward
lines.append("""None""".ljust(__A ) + """* """ * len(__A ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(__A )
def __iter__( self ):
__a = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__a = node.forward[0]
def snake_case_ ( self ):
__a = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def snake_case_ ( self , __A ):
__a = []
__a = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__A )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def snake_case_ ( self , __A ):
__a , __a = self._locate_node(__A )
if node is not None:
for i, update_node in enumerate(__A ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a = node.forward[i]
else:
__a = update_node.forward[:i]
def snake_case_ ( self , __A , __A ):
__a , __a = self._locate_node(__A )
if node is not None:
__a = value
else:
__a = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __A ):
update_vector.append(self.head )
__a = level
__a = Node(__A , __A )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__A )
else:
__a = new_node
def snake_case_ ( self , __A ):
__a , __a = self._locate_node(__A )
if node is not None:
return node.value
return None
def a ():
__a = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
assert len(lowerCAmelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def a ():
__a = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
if len(lowerCAmelCase__ ) != 4:
print()
assert len(lowerCAmelCase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def a ():
__a = SkipList()
assert skip_list.find("""Some key""" ) is None
def a ():
__a = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def a ():
__a = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def a ():
__a = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def a ():
__a = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def a ():
__a = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 142 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(lowerCAmelCase__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowerCAmelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def a ():
def is_sorted(lowerCAmelCase__ ):
return all(next_item >= item for item, next_item in zip(lowerCAmelCase__ , lst[1:] ) )
__a = SkipList()
for i in range(10 ):
skip_list.insert(lowerCAmelCase__ , lowerCAmelCase__ )
assert is_sorted(list(lowerCAmelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowerCAmelCase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(lowerCAmelCase__ ) )
def a ():
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def a ():
__a = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 99 |
'''simple docstring'''
def A_ ( snake_case = 600851475143 ):
try:
SCREAMING_SNAKE_CASE:str = int(snake_case )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
SCREAMING_SNAKE_CASE:Optional[Any] = 1
SCREAMING_SNAKE_CASE:Optional[Any] = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE:List[Any] = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE:List[str] = n
return int(snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 143 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 |
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ) -> str:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowerCAmelCase = mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = max(
mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , )
lowerCAmelCase = val
return f[i][j]
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
lowerCAmelCase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowerCAmelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowerCAmelCase = dp[i - 1][w_]
return dp[n][w_], dp
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ) -> List[str]:
"""simple docstring"""
if not (isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
if num_items != len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = (
"""The number of weights must be the same as the number of values.\n"""
f'But got {num_items} weights and {len(_SCREAMING_SNAKE_CASE )} values'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
if not isinstance(wt[i] , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = (
"""All weights must be integers but got weight of """
f'type {type(wt[i] )} at index {i}'
)
raise TypeError(_SCREAMING_SNAKE_CASE )
lowerCAmelCase, lowerCAmelCase = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = set()
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return optimal_val, example_optional_set
def _snake_case ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ) -> str:
"""simple docstring"""
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
optimal_set.add(_SCREAMING_SNAKE_CASE )
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = [3, 2, 4, 4]
UpperCAmelCase = [4, 3, 2, 3]
UpperCAmelCase = 4
UpperCAmelCase = 6
UpperCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase , UpperCAmelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase , UpperCAmelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 344 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
lowercase_ = namedtuple('covid_data', 'cases deaths recovered')
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
_a = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) )
lowercase_ = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 562 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = '''mra'''
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[Any]=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : Tuple=12 , UpperCamelCase : Union[str, Any]=12 , UpperCamelCase : int=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : Tuple=0.02 , UpperCamelCase : Union[str, Any]=1E-5 , UpperCamelCase : Tuple="absolute" , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Optional[Any]="full" , UpperCamelCase : List[str]=0 , UpperCamelCase : Tuple=0 , UpperCamelCase : Any=1 , UpperCamelCase : Any=0 , UpperCamelCase : Optional[int]=2 , **UpperCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_lowercase : List[Any] = vocab_size
_lowercase : Any = max_position_embeddings
_lowercase : List[str] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : Any = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : int = initializer_range
_lowercase : Dict = type_vocab_size
_lowercase : Union[str, Any] = layer_norm_eps
_lowercase : Tuple = position_embedding_type
_lowercase : List[str] = block_per_row
_lowercase : int = approx_mode
_lowercase : Optional[Any] = initial_prior_first_n_blocks
_lowercase : Dict = initial_prior_diagonal_n_blocks
| 322 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
a__ : List[Any] = logging.get_logger(__name__)
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : str = None
@staticmethod
def __lowerCAmelCase ( ) ->str:
raise NotImplementedError
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) ->Optional[int]:
raise NotImplementedError
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError
def __lowerCAmelCase ( self ) ->Tuple:
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def __lowerCAmelCase ( cls ) ->Union[str, Any]:
return F"""`pip install {cls.pip_package or cls.name}`"""
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 'optuna'
@staticmethod
def __lowerCAmelCase ( ) ->Union[str, Any]:
return is_optuna_available()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) ->Union[str, Any]:
return run_hp_search_optuna(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
return default_hp_space_optuna(_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = 'ray'
__SCREAMING_SNAKE_CASE : Union[str, Any] = '\'ray[tune]\''
@staticmethod
def __lowerCAmelCase ( ) ->int:
return is_ray_available()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) ->Dict:
return run_hp_search_ray(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[int]:
return default_hp_space_ray(_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'sigopt'
@staticmethod
def __lowerCAmelCase ( ) ->Tuple:
return is_sigopt_available()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
return run_hp_search_sigopt(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
return default_hp_space_sigopt(_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = 'wandb'
@staticmethod
def __lowerCAmelCase ( ) ->Optional[int]:
return is_wandb_available()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
return run_hp_search_wandb(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
return default_hp_space_wandb(_lowerCamelCase )
a__ : int = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a__ ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = available_backends[0].name
if len(a__ ) > 1:
logger.info(
F"""{len(a__ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 714 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Tuple = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 333 | 0 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__A : List[Any] = getLogger(__name__)
__A : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
def __a ( A__ : List[str] , A__ : str , A__ : str , A__ : int = 8 , A__ : str = DEFAULT_DEVICE , A__ : List[str]=False , A__ : Tuple="summarization" , A__ : int=None , **A__ : List[Any] , ):
SCREAMING_SNAKE_CASE = Path(A__ ).open("w" , encoding="utf-8" )
SCREAMING_SNAKE_CASE = str(A__ )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
SCREAMING_SNAKE_CASE = model.half()
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
SCREAMING_SNAKE_CASE = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
SCREAMING_SNAKE_CASE = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
SCREAMING_SNAKE_CASE = [prefix + text for text in examples_chunk]
SCREAMING_SNAKE_CASE = tokenizer(A__ , return_tensors="pt" , truncation=A__ , padding="longest" ).to(A__ )
SCREAMING_SNAKE_CASE = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
SCREAMING_SNAKE_CASE = int(time.time() - start_time ) # seconds
SCREAMING_SNAKE_CASE = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __a ( ):
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def __a ( A__ : List[Any]=True ):
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("model_name" , type=A__ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=A__ , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=A__ , help="where to save summaries" )
parser.add_argument("--reference_path" , type=A__ , required=A__ , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=A__ , required=A__ , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=A__ , required=A__ , default=A__ , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=A__ , required=A__ , default=A__ , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=A__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=A__ , default=8 , required=A__ , help="batch size" )
parser.add_argument(
"--n_obs" , type=A__ , default=-1 , required=A__ , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=A__ , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_known_args()
SCREAMING_SNAKE_CASE = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
SCREAMING_SNAKE_CASE = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
SCREAMING_SNAKE_CASE = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
SCREAMING_SNAKE_CASE = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
SCREAMING_SNAKE_CASE = calculate_bleu if "translation" in args.task else calculate_rouge
SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.save_path ).readlines()]
SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
SCREAMING_SNAKE_CASE = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
SCREAMING_SNAKE_CASE = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 16 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = (KDPMaDiscreteScheduler,)
_A = 10
def lowerCAmelCase ( self : List[str] , **A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_: str = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**A_ )
return config
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A_ )
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A_ )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_: int = self.scheduler_classes[0]
lowerCamelCase_: List[str] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase_: List[str] = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_: Dict = self.dummy_model()
lowerCamelCase_: Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_: Optional[int] = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_: str = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_: str = model(A_ , A_ )
lowerCamelCase_: Union[str, Any] = scheduler.step(A_ , A_ , A_ )
lowerCamelCase_: List[str] = output.prev_sample
lowerCamelCase_: List[Any] = torch.sum(torch.abs(A_ ) )
lowerCamelCase_: Dict = torch.mean(torch.abs(A_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if torch_device == "mps":
return
lowerCamelCase_: Optional[Any] = self.scheduler_classes[0]
lowerCamelCase_: List[Any] = self.get_scheduler_config()
lowerCamelCase_: Optional[Any] = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_: str = self.dummy_model()
lowerCamelCase_: Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_: int = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_: Dict = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_: Union[str, Any] = model(A_ , A_ )
lowerCamelCase_: Tuple = scheduler.step(A_ , A_ , A_ )
lowerCamelCase_: List[str] = output.prev_sample
lowerCamelCase_: Union[str, Any] = torch.sum(torch.abs(A_ ) )
lowerCamelCase_: Dict = torch.mean(torch.abs(A_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if torch_device == "mps":
return
lowerCamelCase_: List[Any] = self.scheduler_classes[0]
lowerCamelCase_: str = self.get_scheduler_config()
lowerCamelCase_: List[Any] = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
lowerCamelCase_: Union[str, Any] = self.dummy_model()
lowerCamelCase_: Tuple = self.dummy_sample_deter.to(A_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase_: Optional[Any] = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_: Optional[Any] = model(A_ , A_ )
lowerCamelCase_: Optional[int] = scheduler.step(A_ , A_ , A_ )
lowerCamelCase_: Optional[int] = output.prev_sample
lowerCamelCase_: List[str] = torch.sum(torch.abs(A_ ) )
lowerCamelCase_: Optional[int] = torch.mean(torch.abs(A_ ) )
if str(A_ ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 423 | 0 |
def _lowercase ( lowercase__ = 1_0_0_0 ):
__lowerCAmelCase : Dict = -1
__lowerCAmelCase : Optional[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowerCAmelCase : List[str] = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowerCAmelCase : Tuple = n - a - b
if c * c == (a * a + b * b):
__lowerCAmelCase : str = a * b * c
if candidate >= product:
__lowerCAmelCase : Dict = candidate
return product
if __name__ == "__main__":
print(F"{solution() = }")
| 712 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583 | 0 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A(__a: List[Any] ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(A_ , "_dynamo" ):
return False
return isinstance(A_ , torch._dynamo.eval_frame.OptimizedModule )
def A(__a: List[str] , __a: bool = True ):
lowerCAmelCase_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCAmelCase_ = is_compiled_module(A_ )
if is_compiled:
lowerCAmelCase_ = model
lowerCAmelCase_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A_ , A_ ):
lowerCAmelCase_ = model.module
if not keep_fpaa_wrapper:
lowerCAmelCase_ = getattr(A_ , "forward" )
lowerCAmelCase_ = model.__dict__.pop("_original_forward" , A_ )
if original_forward is not None:
while hasattr(A_ , "__wrapped__" ):
lowerCAmelCase_ = forward.__wrapped__
if forward == original_forward:
break
lowerCAmelCase_ = forward
if getattr(A_ , "_converted_to_transformer_engine" , A_ ):
convert_model(A_ , to_transformer_engine=A_ )
if is_compiled:
lowerCAmelCase_ = model
lowerCAmelCase_ = compiled_model
return model
def A():
PartialState().wait_for_everyone()
def A(__a: Optional[int] , __a: str ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A_ , A_ )
elif PartialState().local_process_index == 0:
torch.save(A_ , A_ )
@contextmanager
def A(**__a: Any ):
for key, value in kwargs.items():
lowerCAmelCase_ = str(A_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A(__a: Any ):
if not hasattr(A_ , "__qualname__" ) and not hasattr(A_ , "__name__" ):
lowerCAmelCase_ = getattr(A_ , "__class__" , A_ )
if hasattr(A_ , "__qualname__" ):
return obj.__qualname__
if hasattr(A_ , "__name__" ):
return obj.__name__
return str(A_ )
def A(__a: Any , __a: Dict ):
for key, value in source.items():
if isinstance(A_ , A_ ):
lowerCAmelCase_ = destination.setdefault(A_ , {} )
merge_dicts(A_ , A_ )
else:
lowerCAmelCase_ = value
return destination
def A(__a: int = None ):
if port is None:
lowerCAmelCase_ = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 122 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=2_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : int=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : List[Any] = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Optional[int] = frequency_stride
_lowerCamelCase : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : Any = frequency_out_dimension * time_out_dimension
_lowerCamelCase : List[Any] = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ASTModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''input_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase : str = torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.default_feature_extractor
_lowerCamelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase : List[Any] = prepare_audio()
_lowerCamelCase : Dict = audio.squeeze().numpy()
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 | 0 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
SCREAMING_SNAKE_CASE = logging.WARNING
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : List[Any] = os.getenv('DATASETS_VERBOSITY' ,__UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def __lowerCAmelCase( ):
"""simple docstring"""
return __name__.split('.' )[0]
def __lowerCAmelCase( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : Union[str, Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : Any = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __lowerCAmelCase( __UpperCAmelCase = None ):
"""simple docstring"""
if name is None:
_lowercase : str = _get_library_name()
return logging.getLogger(__UpperCAmelCase )
def __lowerCAmelCase( ):
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_get_library_root_logger().setLevel(__UpperCAmelCase )
def __lowerCAmelCase( ):
"""simple docstring"""
return set_verbosity(__UpperCAmelCase )
def __lowerCAmelCase( ):
"""simple docstring"""
return set_verbosity(__UpperCAmelCase )
def __lowerCAmelCase( ):
"""simple docstring"""
return set_verbosity(__UpperCAmelCase )
def __lowerCAmelCase( ):
"""simple docstring"""
return set_verbosity(__UpperCAmelCase )
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : Union[str, Any] = False
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : int = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _lowerCamelCase :
def __init__( self : Optional[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[Any] ): # pylint: disable=unused-argument
"""simple docstring"""
_lowercase : List[str] = args[0] if args else None
def __iter__( self : Any ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
def empty_fn(*lowerCamelCase_ : int , **lowerCamelCase_ : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ):
"""simple docstring"""
return self
def __exit__( self : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : int ):
"""simple docstring"""
return
SCREAMING_SNAKE_CASE = True
class _lowerCamelCase :
def __call__( self : Optional[int] , *lowerCamelCase_ : List[str] , lowerCamelCase_ : str=False , **lowerCamelCase_ : Dict ):
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCamelCase_ , **lowerCamelCase_ )
else:
return EmptyTqdm(*lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
_lowercase : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE = _tqdm_cls()
def __lowerCAmelCase( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def __lowerCAmelCase( ):
"""simple docstring"""
global _tqdm_active
_lowercase : List[str] = True
def __lowerCAmelCase( ):
"""simple docstring"""
global _tqdm_active
_lowercase : List[Any] = False
| 720 |
"""simple docstring"""
import math
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
_lowercase : List[Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCAmelCase )
if number < 1:
_lowercase : List[Any] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_lowercase : str = int(math.log(number // 3 ,2 ) ) + 2
_lowercase : Union[str, Any] = [3, 5]
_lowercase : Optional[int] = 2
_lowercase : List[Any] = 3
for block in range(1 ,__UpperCAmelCase ):
for _ in range(__UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
SCREAMING_SNAKE_CASE = 0
try:
SCREAMING_SNAKE_CASE = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 283 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
SCREAMING_SNAKE_CASE__ = nn.Parameter(lowerCAmelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
SCREAMING_SNAKE_CASE__ = nn.Parameter(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
# set torch weights for 1-to-1 comparison
SCREAMING_SNAKE_CASE__ = np.asarray(weights[0] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[1] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
SCREAMING_SNAKE_CASE__ = np.asarray(weights[0] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[1] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[2] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
# layernorm 1
SCREAMING_SNAKE_CASE__ = weights[0][0][0]
SCREAMING_SNAKE_CASE__ = np.asarray(layer_norm_a[0] )
SCREAMING_SNAKE_CASE__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# lsh weights + output
SCREAMING_SNAKE_CASE__ = weights[0][1]
if len(lowerCAmelCase_ ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
else:
set_layer_weights_in_torch_local(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
# intermediate weighs
SCREAMING_SNAKE_CASE__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase_ ) == 4:
SCREAMING_SNAKE_CASE__ = intermediate_weights[2]
# layernorm 2
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[0][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# intermediate dense
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[1][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
# intermediate out
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[4][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
# reformer model
SCREAMING_SNAKE_CASE__ = torch_model.reformer
# word embeds
SCREAMING_SNAKE_CASE__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase_ ) , )
if isinstance(weights[3] , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
SCREAMING_SNAKE_CASE__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.tensor(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
SCREAMING_SNAKE_CASE__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# output layer norm
SCREAMING_SNAKE_CASE__ = np.asarray(weights[7][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# output embeddings
SCREAMING_SNAKE_CASE__ = np.asarray(weights[9][0] )
SCREAMING_SNAKE_CASE__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
# Initialise PyTorch model
SCREAMING_SNAKE_CASE__ = ReformerConfig.from_json_file(lowerCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = ReformerModelWithLMHead(lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(lowerCAmelCase_ )['''weights''']
set_model_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 100 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
A : int = TypeVar("""T""")
class lowerCAmelCase_ ( Generic[T] ):
def __init__( self : int, _snake_case : bool = True ):
'''simple docstring'''
snake_case : dict[T, list[T]] ={} # dictionary of lists
snake_case : Optional[int] =directed
def __snake_case ( self : Any, _snake_case : T, _snake_case : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
self.adj_list[destination_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
snake_case : Any =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_snake_case )
snake_case : int =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case : Union[str, Any] =[destination_vertex]
snake_case : str =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
snake_case : Optional[Any] =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case : Any =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case : int =[destination_vertex]
snake_case : Optional[Any] =[]
return self
def __repr__( self : int ):
'''simple docstring'''
return pformat(self.adj_list )
| 349 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ ( __A , __A ):
'''simple docstring'''
lowerCamelCase = 1
@register_to_config
def __init__( self : str , __magic_name__ : int = 1000 , __magic_name__ : Optional[Union[np.ndarray, List[float]]] = None ) -> List[str]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__magic_name__ )
# standard deviation of the initial noise distribution
lowerCamelCase_ : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowerCamelCase_ : Optional[Any] = 4
# running values
lowerCamelCase_ : Union[str, Any] = []
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : int , __magic_name__ : Union[str, torch.device] = None ) -> List[Any]:
lowerCamelCase_ : Tuple = num_inference_steps
lowerCamelCase_ : int = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowerCamelCase_ : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowerCamelCase_ : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowerCamelCase_ : Tuple = torch.sin(steps * math.pi / 2 ) ** 2
lowerCamelCase_ : Tuple = (1.0 - self.betas**2) ** 0.5
lowerCamelCase_ : Union[str, Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowerCamelCase_ : List[str] = timesteps.to(__magic_name__ )
lowerCamelCase_ : Any = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : torch.FloatTensor , __magic_name__ : int , __magic_name__ : torch.FloatTensor , __magic_name__ : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowerCamelCase_ : str = (self.timesteps == timestep).nonzero().item()
lowerCamelCase_ : Union[str, Any] = timestep_index + 1
lowerCamelCase_ : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__magic_name__ )
if len(self.ets ) == 1:
lowerCamelCase_ : str = self.ets[-1]
elif len(self.ets ) == 2:
lowerCamelCase_ : Optional[Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowerCamelCase_ : Any = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowerCamelCase_ : Tuple = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowerCamelCase_ : Union[str, Any] = self._get_prev_sample(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : torch.FloatTensor , *__magic_name__ : Optional[Any] , **__magic_name__ : Tuple ) -> torch.FloatTensor:
return sample
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : List[str] ) -> Optional[int]:
lowerCamelCase_ : List[str] = self.alphas[timestep_index]
lowerCamelCase_ : int = self.betas[timestep_index]
lowerCamelCase_ : Union[str, Any] = self.alphas[prev_timestep_index]
lowerCamelCase_ : Tuple = self.betas[prev_timestep_index]
lowerCamelCase_ : Tuple = (sample - sigma * ets) / max(__magic_name__ , 1e-8 )
lowerCamelCase_ : int = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 253 |
from __future__ import annotations
def __a ( __UpperCAmelCase : list[int | str] ) -> None:
"""simple docstring"""
create_state_space_tree(__UpperCAmelCase , [] , 0 , [0 for i in range(len(__UpperCAmelCase ) )] )
def __a ( __UpperCAmelCase : list[int | str] , __UpperCAmelCase : list[int | str] , __UpperCAmelCase : int , __UpperCAmelCase : list[int] , ) -> None:
"""simple docstring"""
if index == len(__UpperCAmelCase ):
print(__UpperCAmelCase )
return
for i in range(len(__UpperCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCamelCase_ : str = True
create_state_space_tree(__UpperCAmelCase , __UpperCAmelCase , index + 1 , __UpperCAmelCase )
current_sequence.pop()
lowerCamelCase_ : Dict = False
snake_case_ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case_ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 253 | 1 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : List[Any] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Any = """segformer"""
def __init__( self : Optional[Any] , snake_case__ : Dict=3 , snake_case__ : int=4 , snake_case__ : Tuple=[2, 2, 2, 2] , snake_case__ : int=[8, 4, 2, 1] , snake_case__ : Optional[int]=[32, 64, 160, 256] , snake_case__ : List[str]=[7, 3, 3, 3] , snake_case__ : Optional[Any]=[4, 2, 2, 2] , snake_case__ : List[str]=[1, 2, 5, 8] , snake_case__ : Optional[int]=[4, 4, 4, 4] , snake_case__ : Optional[int]="gelu" , snake_case__ : Optional[Any]=0.0 , snake_case__ : int=0.0 , snake_case__ : List[Any]=0.1 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[Any]=1E-6 , snake_case__ : str=256 , snake_case__ : int=255 , **snake_case__ : List[str] , ):
super().__init__(**snake_case__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , snake_case__ , )
lowerCamelCase_ : Optional[int] =num_channels
lowerCamelCase_ : Tuple =num_encoder_blocks
lowerCamelCase_ : Optional[int] =depths
lowerCamelCase_ : Tuple =sr_ratios
lowerCamelCase_ : List[Any] =hidden_sizes
lowerCamelCase_ : int =patch_sizes
lowerCamelCase_ : List[str] =strides
lowerCamelCase_ : List[Any] =mlp_ratios
lowerCamelCase_ : List[Any] =num_attention_heads
lowerCamelCase_ : Any =hidden_act
lowerCamelCase_ : Optional[Any] =hidden_dropout_prob
lowerCamelCase_ : int =attention_probs_dropout_prob
lowerCamelCase_ : Any =classifier_dropout_prob
lowerCamelCase_ : str =initializer_range
lowerCamelCase_ : Optional[int] =drop_path_rate
lowerCamelCase_ : Optional[Any] =layer_norm_eps
lowerCamelCase_ : int =decoder_hidden_size
lowerCamelCase_ : Optional[int] =kwargs.get("reshape_last_stage" , snake_case__ )
lowerCamelCase_ : int =semantic_loss_ignore_index
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Union[str, Any] = version.parse("1.11" )
@property
def UpperCAmelCase__ ( self : str ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase__ ( self : List[Any] ):
return 1E-4
@property
def UpperCAmelCase__ ( self : List[str] ):
return 12
| 153 |
def lowercase ( a , a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = [False] * len(a )
SCREAMING_SNAKE_CASE_ :List[Any] = []
queue.append(a )
SCREAMING_SNAKE_CASE_ :int = True
while queue:
SCREAMING_SNAKE_CASE_ :int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(a )
SCREAMING_SNAKE_CASE_ :Tuple = True
SCREAMING_SNAKE_CASE_ :Optional[int] = u
return visited[t]
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Any = [-1] * (len(a ))
SCREAMING_SNAKE_CASE_ :Tuple = 0
while bfs(a , a , a , a ):
SCREAMING_SNAKE_CASE_ :List[Any] = float("Inf" )
SCREAMING_SNAKE_CASE_ :str = sink
while s != source:
# Find the minimum value in select path
SCREAMING_SNAKE_CASE_ :str = min(a , graph[parent[s]][s] )
SCREAMING_SNAKE_CASE_ :Optional[Any] = parent[s]
max_flow += path_flow
SCREAMING_SNAKE_CASE_ :Dict = sink
while v != source:
SCREAMING_SNAKE_CASE_ :int = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
SCREAMING_SNAKE_CASE_ :Any = parent[v]
return max_flow
SCREAMING_SNAKE_CASE__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 631 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
A_ : List[str] = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
A_ : List[Any] = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_lowerCamelCase )[0]
@deprecated(_lowerCamelCase , """Please use tf.data to implement this functionality.""" )
def UpperCamelCase__ ( __magic_name__ : int ) -> Any:
'''simple docstring'''
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=_lowerCamelCase ) as bytestream:
snake_case__ : str = _readaa(_lowerCamelCase )
if magic != 20_51:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
snake_case__ : List[str] = _readaa(_lowerCamelCase )
snake_case__ : Dict = _readaa(_lowerCamelCase )
snake_case__ : Optional[int] = _readaa(_lowerCamelCase )
snake_case__ : Dict = bytestream.read(rows * cols * num_images )
snake_case__ : Optional[int] = numpy.frombuffer(_lowerCamelCase , dtype=numpy.uinta )
snake_case__ : Dict = data.reshape(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 1 )
return data
@deprecated(_lowerCamelCase , """Please use tf.one_hot on tensors.""" )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : List[str] ) -> str:
'''simple docstring'''
snake_case__ : str = labels_dense.shape[0]
snake_case__ : str = numpy.arange(_lowerCamelCase ) * num_classes
snake_case__ : str = numpy.zeros((num_labels, num_classes) )
snake_case__ : Tuple = 1
return labels_one_hot
@deprecated(_lowerCamelCase , """Please use tf.data to implement this functionality.""" )
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=False , __magic_name__ : Optional[int]=10 ) -> Dict:
'''simple docstring'''
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=_lowerCamelCase ) as bytestream:
snake_case__ : int = _readaa(_lowerCamelCase )
if magic != 20_49:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
snake_case__ : Any = _readaa(_lowerCamelCase )
snake_case__ : List[Any] = bytestream.read(_lowerCamelCase )
snake_case__ : Dict = numpy.frombuffer(_lowerCamelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowerCamelCase , _lowerCamelCase )
return labels
class __snake_case :
'''simple docstring'''
@deprecated(
__SCREAMING_SNAKE_CASE , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=dtypes.floataa , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : Optional[int] = random_seed.get_seed(__SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
snake_case__ : Optional[Any] = dtypes.as_dtype(__SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
snake_case__ : str = 1_0_0_0_0
snake_case__ : Optional[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
snake_case__ : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
snake_case__ : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
snake_case__ : List[Any] = images.astype(numpy.floataa )
snake_case__ : Optional[Any] = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 255.0 )
snake_case__ : Optional[Any] = images
snake_case__ : List[Any] = labels
snake_case__ : str = 0
snake_case__ : Union[str, Any] = 0
@property
def __UpperCamelCase ( self ):
return self._images
@property
def __UpperCamelCase ( self ):
return self._labels
@property
def __UpperCamelCase ( self ):
return self._num_examples
@property
def __UpperCamelCase ( self ):
return self._epochs_completed
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
if fake_data:
snake_case__ : Any = [1] * 7_8_4
snake_case__ : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(__SCREAMING_SNAKE_CASE )],
)
snake_case__ : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
snake_case__ : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(__SCREAMING_SNAKE_CASE )
snake_case__ : int = self.images[perma]
snake_case__ : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
snake_case__ : Optional[int] = self._num_examples - start
snake_case__ : Optional[int] = self._images[start : self._num_examples]
snake_case__ : int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
snake_case__ : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.images[perm]
snake_case__ : Tuple = self.labels[perm]
# Start next epoch
snake_case__ : Tuple = 0
snake_case__ : Union[str, Any] = batch_size - rest_num_examples
snake_case__ : List[str] = self._index_in_epoch
snake_case__ : Dict = self._images[start:end]
snake_case__ : str = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
snake_case__ : Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCamelCase , """Please write your own downloading logic.""" )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if not gfile.Exists(_lowerCamelCase ):
gfile.MakeDirs(_lowerCamelCase )
snake_case__ : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if not gfile.Exists(_lowerCamelCase ):
urllib.request.urlretrieve(_lowerCamelCase , _lowerCamelCase ) # noqa: S310
with gfile.GFile(_lowerCamelCase ) as f:
snake_case__ : Any = f.size()
print("""Successfully downloaded""" , _lowerCamelCase , _lowerCamelCase , """bytes.""" )
return filepath
@deprecated(
_lowerCamelCase , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=False , __magic_name__ : str=False , __magic_name__ : List[str]=dtypes.floataa , __magic_name__ : Any=True , __magic_name__ : Union[str, Any]=50_00 , __magic_name__ : str=None , __magic_name__ : Optional[int]=DEFAULT_SOURCE_URL , ) -> List[Any]:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_lowerCamelCase , one_hot=_lowerCamelCase , dtype=_lowerCamelCase , seed=_lowerCamelCase )
snake_case__ : Optional[int] = fake()
snake_case__ : Tuple = fake()
snake_case__ : List[str] = fake()
return _Datasets(train=_lowerCamelCase , validation=_lowerCamelCase , test=_lowerCamelCase )
if not source_url: # empty string check
snake_case__ : str = DEFAULT_SOURCE_URL
snake_case__ : Optional[int] = "train-images-idx3-ubyte.gz"
snake_case__ : Dict = "train-labels-idx1-ubyte.gz"
snake_case__ : List[str] = "t10k-images-idx3-ubyte.gz"
snake_case__ : List[str] = "t10k-labels-idx1-ubyte.gz"
snake_case__ : Optional[int] = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + train_images_file )
with gfile.Open(_lowerCamelCase , """rb""" ) as f:
snake_case__ : int = _extract_images(_lowerCamelCase )
snake_case__ : Optional[Any] = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + train_labels_file )
with gfile.Open(_lowerCamelCase , """rb""" ) as f:
snake_case__ : int = _extract_labels(_lowerCamelCase , one_hot=_lowerCamelCase )
snake_case__ : int = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + test_images_file )
with gfile.Open(_lowerCamelCase , """rb""" ) as f:
snake_case__ : Optional[int] = _extract_images(_lowerCamelCase )
snake_case__ : str = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + test_labels_file )
with gfile.Open(_lowerCamelCase , """rb""" ) as f:
snake_case__ : List[str] = _extract_labels(_lowerCamelCase , one_hot=_lowerCamelCase )
if not 0 <= validation_size <= len(_lowerCamelCase ):
snake_case__ : str = (
"Validation size should be between 0 and "
f"{len(_lowerCamelCase )}. Received: {validation_size}."
)
raise ValueError(_lowerCamelCase )
snake_case__ : Any = train_images[:validation_size]
snake_case__ : Optional[Any] = train_labels[:validation_size]
snake_case__ : Optional[int] = train_images[validation_size:]
snake_case__ : Tuple = train_labels[validation_size:]
snake_case__ : List[str] = {"dtype": dtype, "reshape": reshape, "seed": seed}
snake_case__ : Union[str, Any] = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
snake_case__ : str = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
snake_case__ : Optional[Any] = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
return _Datasets(train=_lowerCamelCase , validation=_lowerCamelCase , test=_lowerCamelCase )
| 706 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> int:
'''simple docstring'''
with open(__magic_name__ ) as metadata_file:
snake_case__ : Optional[Any] = json.load(__magic_name__ )
snake_case__ : Tuple = LukeConfig(use_entity_aware_attention=__magic_name__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
snake_case__ : Tuple = torch.load(__magic_name__ , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
snake_case__ : Any = load_original_entity_vocab(__magic_name__ )
# add an entry for [MASK2]
snake_case__ : List[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case__ : List[str] = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case__ : Optional[Any] = AddedToken("""<ent>""" , lstrip=__magic_name__ , rstrip=__magic_name__ )
snake_case__ : Any = AddedToken("""<ent2>""" , lstrip=__magic_name__ , rstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """r""" ) as f:
snake_case__ : Union[str, Any] = json.load(__magic_name__ )
snake_case__ : Optional[Any] = """MLukeTokenizer"""
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
snake_case__ : List[Any] = MLukeTokenizer.from_pretrained(__magic_name__ )
# Initialize the embeddings of the special tokens
snake_case__ : List[str] = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
snake_case__ : Optional[Any] = state_dict["""embeddings.word_embeddings.weight"""]
snake_case__ : List[str] = word_emb[ent_init_index].unsqueeze(0 )
snake_case__ : List[str] = word_emb[enta_init_index].unsqueeze(0 )
snake_case__ : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case__ : List[str] = state_dict[bias_name]
snake_case__ : List[str] = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case__ : Dict = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case__ : str = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case__ : Union[str, Any] = f"encoder.layer.{layer_index}.attention.self."
snake_case__ : Tuple = state_dict[prefix + matrix_name]
snake_case__ : str = state_dict[prefix + matrix_name]
snake_case__ : Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case__ : Union[str, Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
snake_case__ : Union[str, Any] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
snake_case__ : List[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case__ : Optional[Any] = state_dict["""entity_predictions.bias"""]
snake_case__ : Optional[Any] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
snake_case__ : Any = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case__ : int = LukeForMaskedLM(config=__magic_name__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
snake_case__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
snake_case__ : Optional[Any] = state_dict[key]
else:
snake_case__ : Optional[int] = state_dict[key]
snake_case__ , snake_case__ : Any = model.load_state_dict(__magic_name__ , strict=__magic_name__ )
if set(__magic_name__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(__magic_name__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case__ : List[Any] = MLukeTokenizer.from_pretrained(__magic_name__ , task="""entity_classification""" )
snake_case__ : int = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
snake_case__ : Union[str, Any] = (0, 9)
snake_case__ : str = tokenizer(__magic_name__ , entity_spans=[span] , return_tensors="""pt""" )
snake_case__ : List[Any] = model(**__magic_name__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case__ : List[Any] = torch.Size((1, 33, 7_68) )
snake_case__ : Optional[int] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case__ : Tuple = torch.Size((1, 1, 7_68) )
snake_case__ : int = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case__ : Optional[Any] = MLukeTokenizer.from_pretrained(__magic_name__ )
snake_case__ : Any = """Tokyo is the capital of <mask>."""
snake_case__ : str = (24, 30)
snake_case__ : List[str] = tokenizer(__magic_name__ , entity_spans=[span] , return_tensors="""pt""" )
snake_case__ : Optional[int] = model(**__magic_name__ )
snake_case__ : List[Any] = encoding["""input_ids"""][0].tolist()
snake_case__ : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
snake_case__ : int = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__magic_name__ )
snake_case__ : str = outputs.entity_logits[0][0].argmax().item()
snake_case__ : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__magic_name__ ) )
model.save_pretrained(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Any = ["""[MASK]""", """[PAD]""", """[UNK]"""]
snake_case__ : str = [json.loads(__magic_name__ ) for line in open(__magic_name__ )]
snake_case__ : List[str] = {}
for entry in data:
snake_case__ : Dict = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case__ : List[Any] = entity_id
break
snake_case__ : Optional[Any] = f"{language}:{entity_name}"
snake_case__ : Any = entity_id
return new_mapping
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
A_ : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 419 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = """swin2sr"""
__lowerCamelCase = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , __a : Optional[Any]=64 , __a : List[str]=1 , __a : Dict=3 , __a : Dict=180 , __a : Union[str, Any]=[6, 6, 6, 6, 6, 6] , __a : Tuple=[6, 6, 6, 6, 6, 6] , __a : Optional[Any]=8 , __a : Dict=2.0 , __a : List[str]=True , __a : Dict=0.0 , __a : int=0.0 , __a : str=0.1 , __a : str="gelu" , __a : List[str]=False , __a : Optional[int]=0.02 , __a : List[Any]=1e-5 , __a : Dict=2 , __a : str=1.0 , __a : str="1conv" , __a : Tuple="pixelshuffle" , **__a : List[str] , ):
'''simple docstring'''
super().__init__(**__a )
lowerCamelCase__: Dict = image_size
lowerCamelCase__: Optional[Any] = patch_size
lowerCamelCase__: List[str] = num_channels
lowerCamelCase__: str = embed_dim
lowerCamelCase__: List[Any] = depths
lowerCamelCase__: Union[str, Any] = len(__a )
lowerCamelCase__: Tuple = num_heads
lowerCamelCase__: Tuple = window_size
lowerCamelCase__: List[Any] = mlp_ratio
lowerCamelCase__: List[Any] = qkv_bias
lowerCamelCase__: Optional[Any] = hidden_dropout_prob
lowerCamelCase__: List[Any] = attention_probs_dropout_prob
lowerCamelCase__: Optional[int] = drop_path_rate
lowerCamelCase__: Tuple = hidden_act
lowerCamelCase__: Optional[int] = use_absolute_embeddings
lowerCamelCase__: Optional[Any] = layer_norm_eps
lowerCamelCase__: Tuple = initializer_range
lowerCamelCase__: List[Any] = upscale
lowerCamelCase__: Any = img_range
lowerCamelCase__: Union[str, Any] = resi_connection
lowerCamelCase__: Tuple = upsampler
| 306 |
from __future__ import annotations
_lowercase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowercase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __lowerCAmelCase ( _UpperCamelCase ) -> list[float]:
'''simple docstring'''
lowerCamelCase__: str = []
lowerCamelCase__: List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowerCamelCase__: float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowerCamelCase__: Dict = arr[j]
break
result.append(_UpperCamelCase )
return result
def __lowerCAmelCase ( _UpperCamelCase ) -> list[float]:
'''simple docstring'''
lowerCamelCase__: Tuple = []
for i, outer in enumerate(_UpperCamelCase ):
lowerCamelCase__: float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCamelCase__: Dict = inner
break
result.append(_UpperCamelCase )
return result
def __lowerCAmelCase ( _UpperCamelCase ) -> list[float]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] = len(_UpperCamelCase )
lowerCamelCase__: list[float] = []
lowerCamelCase__: list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCamelCase__: Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowercase = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 306 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _a :
"""simple docstring"""
snake_case_ = XGLMConfig
snake_case_ = {}
snake_case_ = "gelu"
def __init__( self : int , a : Tuple , a : Optional[Any]=14 , a : Dict=7 , a : Union[str, Any]=True , a : List[Any]=True , a : Optional[int]=True , a : Dict=99 , a : Optional[int]=32 , a : Any=2 , a : Optional[int]=4 , a : List[str]=37 , a : Union[str, Any]="gelu" , a : List[str]=0.1 , a : Optional[Any]=0.1 , a : Tuple=5_12 , a : Any=0.02 , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = seq_length
SCREAMING_SNAKE_CASE__ : Dict = is_training
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = d_model
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = ffn_dim
SCREAMING_SNAKE_CASE__ : List[str] = activation_function
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def A_ ( self : Optional[Any] ) ->Union[str, Any]:
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def A_ ( self : Tuple ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def A_ ( self : Optional[int] ) ->int:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=A_ , )
def A_ ( self : Optional[int] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Tuple = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
snake_case_ = (TFXGLMForCausalLM,) if is_tf_available() else ()
snake_case_ = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Optional[Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[str] = TFXGLMModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self , config_class=A_ , n_embd=37 )
def A_ ( self : List[str] ) ->Optional[int]:
self.config_tester.run_common_tests()
@slow
def A_ ( self : Optional[int] ) ->Dict:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[str] = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def A_ ( self : List[Any] ) ->Any:
super().test_resize_token_embeddings()
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self : str , a : Any=True ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
SCREAMING_SNAKE_CASE__ : Any = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
SCREAMING_SNAKE_CASE__ : Dict = model.generate(A_ , do_sample=A_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
@slow
def A_ ( self : Optional[int] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
SCREAMING_SNAKE_CASE__ : List[str] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer("Today is a nice day and" , return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Dict = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(A_ , do_sample=A_ , seed=[7, 0] )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(A_ , A_ )
@slow
def A_ ( self : Tuple ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
SCREAMING_SNAKE_CASE__ : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
SCREAMING_SNAKE_CASE__ : List[str] = "left"
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ : Dict = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(A_ , return_tensors="tf" , padding=A_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs["input_ids"]
SCREAMING_SNAKE_CASE__ : str = model.generate(input_ids=A_ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE__ : Any = model.generate(input_ids=A_ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE__ : str = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE__ : int = model.generate(input_ids=A_ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ : int = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
| 703 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_snake_case : Union[str, Any] = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any] ):
warnings.warn(lowerCAmelCase_, lowerCAmelCase_ )
requires_backends(lowerCAmelCase_, 'sklearn' )
return (preds == labels).mean()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : List[str] ):
warnings.warn(lowerCAmelCase_, lowerCAmelCase_ )
requires_backends(lowerCAmelCase_, 'sklearn' )
__lowerCAmelCase = simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = fa_score(y_true=lowerCAmelCase_, y_pred=lowerCAmelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[int] ):
warnings.warn(lowerCAmelCase_, lowerCAmelCase_ )
requires_backends(lowerCAmelCase_, 'sklearn' )
__lowerCAmelCase = pearsonr(lowerCAmelCase_, lowerCAmelCase_ )[0]
__lowerCAmelCase = spearmanr(lowerCAmelCase_, lowerCAmelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
warnings.warn(lowerCAmelCase_, lowerCAmelCase_ )
requires_backends(lowerCAmelCase_, 'sklearn' )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), F"""Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCAmelCase_, lowerCAmelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCAmelCase_, lowerCAmelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCAmelCase_, lowerCAmelCase_ )
elif task_name == "qqp":
return acc_and_fa(lowerCAmelCase_, lowerCAmelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )}
else:
raise KeyError(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
warnings.warn(lowerCAmelCase_, lowerCAmelCase_ )
requires_backends(lowerCAmelCase_, 'sklearn' )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )}
else:
raise KeyError(lowerCAmelCase_ )
| 53 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 | 0 |
import argparse
import copy
def __magic_name__( SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
'''simple docstring'''
A__ = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
A__ = []
_list.append([line.split()[1], line.split()[2]] )
A__ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
A__ = []
_list.append([line.split()[0], line.split()[2]] )
A__ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __magic_name__( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> str:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ ) as f:
A__ = f.read(1 )
A__ = start_node
A__ = []
A__ = start_node
A__ = 0
while visiting not in first_solution:
A__ = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
A__ = k[1]
A__ = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
A__ = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
A__ = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
A__ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
A__ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def __magic_name__( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ = []
for n in solution[1:-1]:
A__ = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
A__ = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
A__ = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
A__ = kn
A__ = n
A__ = 0
for k in _tmp[:-1]:
A__ = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
A__ = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
A__ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __magic_name__( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ = 1
A__ = first_solution
A__ = []
A__ = distance_of_first_solution
A__ = solution
while count <= iters:
A__ = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = 0
A__ = neighborhood[index_of_best_solution]
A__ = len(SCREAMING_SNAKE_CASE__ ) - 1
A__ = False
while not found:
A__ = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
A__ = best_solution[i]
A__ = solution[i]
break
A__ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
A__ = True
A__ = best_solution[:-1]
A__ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
A__ = cost
A__ = solution
else:
A__ = index_of_best_solution + 1
A__ = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
A__ = count + 1
return best_solution_ever, best_cost
def __magic_name__( SCREAMING_SNAKE_CASE__ : Dict=None ) -> int:
'''simple docstring'''
A__ = generate_neighbours(args.File )
A__ , A__ = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
A__ , A__ = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 709 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = ReformerTokenizer
lowerCamelCase = ReformerTokenizerFast
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = True
def snake_case__ ( self : Any )-> Union[str, Any]:
'''simple docstring'''
super().setUp()
A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
A__ = '<s>'
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ )
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<unk>' )
self.assertEqual(vocab_keys[1],'<s>' )
self.assertEqual(vocab_keys[-1],'j' )
self.assertEqual(len(lowercase_ ),1_0_0_0 )
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,1_0_0_0 )
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = 'I was born in 92000, and this is falsé.'
A__ = tokenizer.tokenize(lowercase_ )
A__ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
def snake_case__ ( self : str,lowercase_ : int=1_5 )-> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ),[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2],)
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
],)
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4],)
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
],)
@cached_property
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
A__ = 'Hello World!'
A__ = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) )
@slow
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
A__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A__ = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def snake_case__ ( self : Union[str, Any] )-> int:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
A__ = ' '.join(lowercase_ )
A__ = self.big_tokenizer.encode_plus(lowercase_,return_tensors='pt' )
A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors='pt' )
A__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A__ = encoded_sequence['input_ids'].shape
A__ = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def snake_case__ ( self : Tuple )-> Dict:
'''simple docstring'''
A__ = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A__ = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_,model_name='google/reformer-crime-and-punishment',revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a',padding=lowercase_,sequences=lowercase_,)
| 586 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__a , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__a , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__a )
return parser.parse_args()
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = parse_args()
# Import training_script as a module.
UpperCamelCase__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase__ = script_fpath.stem
UpperCamelCase__ = importlib.import_module(__a )
# Patch sys.argv
UpperCamelCase__ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 513 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AudioLDMPipeline
SCREAMING_SNAKE_CASE__ = TEXT_TO_AUDIO_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_TO_AUDIO_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCAmelCase_ (self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
UpperCamelCase__ = ClapTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
UpperCamelCase__ = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 2_56
UpperCamelCase__ = audio[:10]
UpperCamelCase__ = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase__ = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase__ = audioldm_pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase__ = text_inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe.text_encoder(
SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase__ = F.normalize(SCREAMING_SNAKE_CASE_ , dim=-1 )
UpperCamelCase__ = prompt_embeds
# forward
UpperCamelCase__ = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 3 * ["""this is a negative prompt"""]
UpperCamelCase__ = negative_prompt
UpperCamelCase__ = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase__ = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase__ = []
for p in [prompt, negative_prompt]:
UpperCamelCase__ = audioldm_pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase__ = text_inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe.text_encoder(
SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase__ = F.normalize(SCREAMING_SNAKE_CASE_ , dim=-1 )
embeds.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = embeds
# forward
UpperCamelCase__ = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """egg cracking"""
UpperCamelCase__ = audioldm_pipe(**SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 2_56
UpperCamelCase__ = audio[:10]
UpperCamelCase__ = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
UpperCamelCase__ = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCamelCase__ = 2
UpperCamelCase__ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCamelCase__ = 2
UpperCamelCase__ = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCamelCase__ = 2
UpperCamelCase__ = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe.vocoder.config.sampling_rate
UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe(audio_length_in_s=0.016 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) / vocoder_sampling_rate == 0.016
UpperCamelCase__ = audioldm_pipe(audio_length_in_s=0.032 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = ["""hey"""]
UpperCamelCase__ = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=1 )
UpperCamelCase__ = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCamelCase__ = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCamelCase__ = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=1 )
UpperCamelCase__ = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCAmelCase_ (self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase_ (self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
@slow
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="cpu" , SCREAMING_SNAKE_CASE_=torch.floataa , SCREAMING_SNAKE_CASE_=0 ):
UpperCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = np.random.RandomState(SCREAMING_SNAKE_CASE_ ).standard_normal((1, 8, 1_28, 16) )
UpperCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase__ = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 25
UpperCamelCase__ = audioldm_pipe(**SCREAMING_SNAKE_CASE_ ).audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 8_19_20
UpperCamelCase__ = audio[7_72_30:7_72_40]
UpperCamelCase__ = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCamelCase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase__ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCamelCase__ = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = audioldm_pipe(**SCREAMING_SNAKE_CASE_ ).audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 8_19_20
UpperCamelCase__ = audio[2_77_80:2_77_90]
UpperCamelCase__ = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCamelCase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 513 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=30 , UpperCamelCase=2 , UpperCamelCase=3 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=None , ) -> int:
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def UpperCamelCase__ ( self ) -> Dict:
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> str:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__a = ViTMSNModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
__a = self.type_sequence_label_size
__a = ViTMSNForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = model(UpperCamelCase , labels=UpperCamelCase )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = ViTMSNForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def UpperCamelCase__ ( self ) -> List[Any]:
__a = ViTMSNModelTester(self )
__a = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def UpperCamelCase__ ( self ) -> str:
pass
def UpperCamelCase__ ( self ) -> Tuple:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def UpperCamelCase__ ( self ) -> Any:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(UpperCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Dict:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ ( self ) -> Any:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTMSNModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ):
__a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase__ ( self ) -> Dict:
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
torch.manual_seed(2 )
__a = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(UpperCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
__a = model(**UpperCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
__a = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
| 490 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def SCREAMING_SNAKE_CASE ( a_ : Tuple ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
def SCREAMING_SNAKE_CASE ( a_ : str ):
# word like '180' or '身高' or '神'
for char in word:
__a = ord(a_ )
if not _is_chinese_char(a_ ):
return 0
return 1
def SCREAMING_SNAKE_CASE ( a_ : List[str] ):
__a = set()
for token in tokens:
__a = len(a_ ) > 1 and is_chinese(a_ )
if chinese_word:
word_set.add(a_ )
__a = list(a_ )
return word_list
def SCREAMING_SNAKE_CASE ( a_ : List[str] , a_ : set() ):
if not chinese_word_set:
return bert_tokens
__a = max([len(a_ ) for w in chinese_word_set] )
__a = bert_tokens
__a , __a = 0, len(a_ )
while start < end:
__a = True
if is_chinese(bert_word[start] ):
__a = min(end - start , a_ )
for i in range(a_ , 1 , -1 ):
__a = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__a = '##' + bert_word[j]
__a = start + i
__a = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE ( a_ : List[str] , a_ : LTP , a_ : BertTokenizer ):
__a = []
for i in range(0 , len(a_ ) , 100 ):
__a = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__a = [get_chinese_word(a_ ) for r in res]
ltp_res.extend(a_ )
assert len(a_ ) == len(a_ )
__a = []
for i in range(0 , len(a_ ) , 100 ):
__a = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a_ , truncation=a_ , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(a_ ) == len(a_ )
__a = []
for input_ids, chinese_word in zip(a_ , a_ ):
__a = []
for id in input_ids:
__a = bert_tokenizer._convert_id_to_token(a_ )
input_tokens.append(a_ )
__a = add_sub_symbol(a_ , a_ )
__a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a_ ):
if token[:2] == "##":
__a = token[2:]
# save chinese tokens' pos
if len(a_ ) == 1 and _is_chinese_char(ord(a_ ) ):
ref_id.append(a_ )
ref_ids.append(a_ )
assert len(a_ ) == len(a_ )
return ref_ids
def SCREAMING_SNAKE_CASE ( a_ : str ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__a = f.readlines()
__a = [line.strip() for line in data if len(a_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__a = LTP(args.ltp ) # faster in GPU device
__a = BertTokenizer.from_pretrained(args.bert )
__a = prepare_ref(a_ , a_ , a_ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__a = [json.dumps(a_ ) + '\n' for ref in ref_ids]
f.writelines(a_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase_ = parser.parse_args()
main(args)
| 490 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( UpperCamelCase_ ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : List[Any]=7 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Dict=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : str=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=5 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : List[str]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Optional[int]="last" , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_lengths
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = gelu_activation
lowerCamelCase_ = sinusoidal_embeddings
lowerCamelCase_ = causal
lowerCamelCase_ = asm
lowerCamelCase_ = n_langs
lowerCamelCase_ = vocab_size
lowerCamelCase_ = n_special
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = summary_type
lowerCamelCase_ = use_proj
lowerCamelCase_ = scope
def UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_input_lengths:
lowerCamelCase_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , 2 ).float()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self : int ) -> Any:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , ) -> Union[str, Any]:
lowerCamelCase_ = FlaubertModel(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ = model(a_ , lengths=a_ , langs=a_ )
lowerCamelCase_ = model(a_ , langs=a_ )
lowerCamelCase_ = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , ) -> Optional[Any]:
lowerCamelCase_ = FlaubertWithLMHeadModel(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Optional[int]:
lowerCamelCase_ = FlaubertForQuestionAnsweringSimple(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ = model(a_ )
lowerCamelCase_ = model(a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , ) -> Tuple:
lowerCamelCase_ = FlaubertForQuestionAnswering(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ = model(a_ )
lowerCamelCase_ = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , )
lowerCamelCase_ = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , )
(lowerCamelCase_ ) = result_with_labels.to_tuple()
lowerCamelCase_ = model(a_ , start_positions=a_ , end_positions=a_ )
(lowerCamelCase_ ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , ) -> Union[str, Any]:
lowerCamelCase_ = FlaubertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ = model(a_ )
lowerCamelCase_ = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = FlaubertForTokenClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , ) -> Dict:
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = FlaubertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
lowerCamelCase_
) = config_and_inputs
lowerCamelCase_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class a ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : List[Any] = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str]=False ) -> Dict:
lowerCamelCase_ = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def UpperCamelCase ( self : List[str] ) -> List[Any]:
lowerCamelCase_ = FlaubertModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=a_ , emb_dim=37 )
def UpperCamelCase ( self : Dict ) -> Any:
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a_ )
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a_ )
def UpperCamelCase ( self : Union[str, Any] ) -> int:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a_ )
def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a_ )
def UpperCamelCase ( self : List[Any] ) -> int:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a_ )
def UpperCamelCase ( self : Dict ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a_ )
def UpperCamelCase ( self : Tuple ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a_ )
@slow
def UpperCamelCase ( self : int ) -> List[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FlaubertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def UpperCamelCase ( self : str ) -> Optional[int]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ = True
lowerCamelCase_ = model_class(config=a_ )
lowerCamelCase_ = self._prepare_for_class(a_ , a_ )
lowerCamelCase_ = torch.jit.trace(
a_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , 'traced_model.pt' ) )
lowerCamelCase_ = torch.jit.load(os.path.join(a_ , 'traced_model.pt' ) , map_location=a_ )
loaded(inputs_dict['input_ids'].to(a_ ) , inputs_dict['attention_mask'].to(a_ ) )
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : Union[str, Any] ) -> str:
lowerCamelCase_ = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
lowerCamelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCamelCase_ = model(a_ )[0]
lowerCamelCase_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a_ )
lowerCamelCase_ = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
| 549 |
import os
import pytest
from attr import dataclass
SCREAMING_SNAKE_CASE__ : int = "us-east-1" # defaults region
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
lowercase_ = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5_500,
}
lowercase_ = {**hyperparameters, 'max_steps': 1_000}
@property
def __lowercase( self : List[str] )-> str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
return F'''{self.framework}-transfromers-test'''
@property
def __lowercase( self : int )-> str:
"""simple docstring"""
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def _a ( lowercase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 85 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Dict = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __A( a__ ):
__A = """sew-d"""
def __init__( self, A=32, A=768, A=12, A=12, A=3072, A=2, A=512, A=256, A=True, A=True, A=("p2c", "c2p"), A="layer_norm", A="gelu_python", A=0.1, A=0.1, A=0.1, A=0.0, A=0.1, A=0.02, A=1E-7, A=1E-5, A="group", A="gelu", A=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), A=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), A=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), A=False, A=128, A=16, A=True, A=0.05, A=10, A=2, A=0.0, A=10, A=0, A="mean", A=False, A=False, A=256, A=0, A=1, A=2, **A, ):
"""simple docstring"""
super().__init__(**lowercase__, pad_token_id=lowercase__, bos_token_id=lowercase__, eos_token_id=lowercase__ )
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(lowercase__ )
_UpperCamelCase = list(lowercase__ )
_UpperCamelCase = list(lowercase__ )
_UpperCamelCase = conv_bias
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim )
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = squeeze_factor
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = position_buckets
_UpperCamelCase = share_att_key
_UpperCamelCase = relative_attention
_UpperCamelCase = norm_rel_ebd
_UpperCamelCase = list(lowercase__ )
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = feature_layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
_UpperCamelCase = mask_feature_min_masks
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# sequence classification
_UpperCamelCase = use_weighted_layer_sum
_UpperCamelCase = classifier_proj_size
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 703 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __A( unittest.TestCase ):
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_UpperCamelCase = dict(zip(A, range(len(A ) ) ) )
_UpperCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_UpperCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_6000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(self.tmpdirname, A )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.feature_extraction_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
# load decoder from hub
_UpperCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCamelCase ( self, **A ):
"""simple docstring"""
_UpperCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **A )
def _UpperCamelCase ( self, **A ):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **A )
def _UpperCamelCase ( self, **A ):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **A )
def _UpperCamelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_feature_extractor()
_UpperCamelCase = self.get_decoder()
_UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=A, feature_extractor=A, decoder=A )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha, 5.0 )
self.assertEqual(processor.language_model.beta, 3.0 )
self.assertEqual(processor.language_model.score_boundary, -7.0 )
self.assertEqual(processor.language_model.unk_score_offset, 3 )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A, '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_feature_extractor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_decoder()
_UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=A, feature_extractor=A, decoder=A )
_UpperCamelCase = floats_list((3, 1000) )
_UpperCamelCase = feature_extractor(A, return_tensors='''np''' )
_UpperCamelCase = processor(A, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_feature_extractor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_decoder()
_UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=A, feature_extractor=A, decoder=A )
_UpperCamelCase = '''This is a test string'''
_UpperCamelCase = processor(text=A )
_UpperCamelCase = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _UpperCamelCase ( self, A=(2, 10, 16), A=77 ):
"""simple docstring"""
np.random.seed(A )
return np.random.rand(*A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_feature_extractor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_decoder()
_UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=A, feature_extractor=A, decoder=A )
_UpperCamelCase = self._get_dummy_logits(shape=(10, 16), seed=13 )
_UpperCamelCase = processor.decode(A )
_UpperCamelCase = decoder.decode_beams(A )[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text )
self.assertEqual('''</s> <s> </s>''', decoded_processor.text )
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
_UpperCamelCase = self.get_feature_extractor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_decoder()
_UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=A, feature_extractor=A, decoder=A )
_UpperCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_UpperCamelCase = processor.batch_decode(A )
else:
with get_context(A ).Pool() as pool:
_UpperCamelCase = processor.batch_decode(A, A )
_UpperCamelCase = list(A )
with get_context('''fork''' ).Pool() as p:
_UpperCamelCase = decoder.decode_beams_batch(A, A )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A, decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''], decoded_processor.text )
self.assertListEqual(A, decoded_processor.logit_score )
self.assertListEqual(A, decoded_processor.lm_score )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_feature_extractor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_decoder()
_UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=A, feature_extractor=A, decoder=A )
_UpperCamelCase = self._get_dummy_logits()
_UpperCamelCase = 15
_UpperCamelCase = -20.0
_UpperCamelCase = -4.0
_UpperCamelCase = processor.batch_decode(
A, beam_width=A, beam_prune_logp=A, token_min_logp=A, )
_UpperCamelCase = decoded_processor_out.text
_UpperCamelCase = list(A )
with get_context('''fork''' ).Pool() as pool:
_UpperCamelCase = decoder.decode_beams_batch(
A, A, beam_width=A, beam_prune_logp=A, token_min_logp=A, )
_UpperCamelCase = [d[0][0] for d in decoded_decoder_out]
_UpperCamelCase = [d[0][2] for d in decoded_decoder_out]
_UpperCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A, A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''], A )
self.assertTrue(np.array_equal(A, decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447], A, atol=1E-3 ) )
self.assertTrue(np.array_equal(A, decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474], A, atol=1E-3 ) )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_feature_extractor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_decoder()
_UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=A, feature_extractor=A, decoder=A )
_UpperCamelCase = self._get_dummy_logits()
_UpperCamelCase = 2.0
_UpperCamelCase = 5.0
_UpperCamelCase = -20.0
_UpperCamelCase = True
_UpperCamelCase = processor.batch_decode(
A, alpha=A, beta=A, unk_score_offset=A, lm_score_boundary=A, )
_UpperCamelCase = decoded_processor_out.text
_UpperCamelCase = list(A )
decoder.reset_params(
alpha=A, beta=A, unk_score_offset=A, lm_score_boundary=A, )
with get_context('''fork''' ).Pool() as pool:
_UpperCamelCase = decoder.decode_beams_batch(
A, A, )
_UpperCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A, A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''], A )
_UpperCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0 )
self.assertEqual(lm_model.beta, 5.0 )
self.assertEqual(lm_model.unk_score_offset, -20.0 )
self.assertEqual(lm_model.score_boundary, A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_UpperCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_UpperCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_UpperCamelCase = os.listdir(A )
_UpperCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A, A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained(A )
_UpperCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_UpperCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_UpperCamelCase = os.listdir(A )
_UpperCamelCase = os.listdir(A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A, A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_UpperCamelCase = floats_list((3, 1000) )
_UpperCamelCase = processor_wavaveca(A, return_tensors='''np''' )
_UpperCamelCase = processor_auto(A, return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1E-2 )
_UpperCamelCase = self._get_dummy_logits()
_UpperCamelCase = processor_wavaveca.batch_decode(A )
_UpperCamelCase = processor_auto.batch_decode(A )
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_feature_extractor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_decoder()
_UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=A, feature_extractor=A, decoder=A )
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg='''`processor` and `feature_extractor` model input names do not match''', )
@staticmethod
def _UpperCamelCase ( A, A ):
"""simple docstring"""
_UpperCamelCase = [d[key] for d in offsets]
return retrieved_list
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_UpperCamelCase = self._get_dummy_logits()[0]
_UpperCamelCase = processor.decode(A, output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A, A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ) ), outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''], '''end_offset''' ), [1, 3, 5] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_UpperCamelCase = self._get_dummy_logits()
_UpperCamelCase = processor.batch_decode(A, output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A, A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A, '''word''' ) ) for o in outputs['''word_offsets''']], outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''word''' ), ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''start_offset''' ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0], '''end_offset''' ), [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCamelCase ( self ):
"""simple docstring"""
import torch
_UpperCamelCase = load_dataset('''common_voice''', '''en''', split='''train''', streaming=A )
_UpperCamelCase = ds.cast_column('''audio''', datasets.Audio(sampling_rate=1_6000 ) )
_UpperCamelCase = iter(A )
_UpperCamelCase = next(A )
_UpperCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_UpperCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_UpperCamelCase = processor(sample['''audio''']['''array'''], return_tensors='''pt''' ).input_values
with torch.no_grad():
_UpperCamelCase = model(A ).logits.cpu().numpy()
_UpperCamelCase = processor.decode(logits[0], output_word_offsets=A )
_UpperCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_UpperCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_UpperCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A, '''word''' ) ), A )
self.assertEqual(''' '''.join(self.get_from_offsets(A, '''word''' ) ), output.text )
# output times
_UpperCamelCase = torch.tensor(self.get_from_offsets(A, '''start_time''' ) )
_UpperCamelCase = torch.tensor(self.get_from_offsets(A, '''end_time''' ) )
# fmt: off
_UpperCamelCase = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
_UpperCamelCase = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A, A, atol=0.01 ) )
self.assertTrue(torch.allclose(A, A, atol=0.01 ) )
| 105 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_A = TypeVar('T')
class lowerCamelCase (Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , _snake_case : list[T] , _snake_case : Callable[[T, T], T] ) -> None:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = len(_snake_case )
SCREAMING_SNAKE_CASE__ = [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__ = fnc
self.build()
def lowerCAmelCase_ ( self : int ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase_ ( self : Tuple , _snake_case : int , _snake_case : T ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__ = v
while p > 1:
SCREAMING_SNAKE_CASE__ = p // 2
SCREAMING_SNAKE_CASE__ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase_ ( self : List[Any] , _snake_case : int , _snake_case : int ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = l + self.N, r + self.N
SCREAMING_SNAKE_CASE__ = None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__ = self.st[l] if res is None else self.fn(_snake_case , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__ = self.st[r] if res is None else self.fn(_snake_case , self.st[r] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_A = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
_A = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
_A = SegmentTree(test_array, min)
_A = SegmentTree(test_array, max)
_A = SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE ( ) -> None:
for i in range(len(__UpperCAmelCase ) ):
for j in range(__UpperCAmelCase , len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE__ = reduce(__UpperCAmelCase , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__ = reduce(__UpperCAmelCase , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__ = reduce(lambda __UpperCAmelCase , __UpperCAmelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__UpperCAmelCase , __UpperCAmelCase )
assert max_range == max_segment_tree.query(__UpperCAmelCase , __UpperCAmelCase )
assert sum_range == sum_segment_tree.query(__UpperCAmelCase , __UpperCAmelCase )
test_all_segments()
for index, value in test_updates.items():
_A = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 159 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_A = 'http://www.mocksite.com/file1.txt'
_A = '"text": ["foo", "foo"]'
_A = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class lowerCamelCase :
'''simple docstring'''
a = 2_0_0
a = {"Content-Length": "100"}
a = {}
def lowerCAmelCase_ ( self : Union[str, Any] , **_snake_case : str ) -> Any:
return [bytes(_snake_case , "utf-8" )]
def SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
import requests
monkeypatch.setattr(__UpperCAmelCase , "request" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = URL
if issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = url
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = [url]
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = {"train": url}
SCREAMING_SNAKE_CASE__ = "dummy"
SCREAMING_SNAKE_CASE__ = "downloads"
SCREAMING_SNAKE_CASE__ = tmp_path
SCREAMING_SNAKE_CASE__ = DownloadConfig(
cache_dir=os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , use_etag=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = DownloadManager(dataset_name=__UpperCAmelCase , download_config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = dl_manager.download(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = [downloaded_paths]
SCREAMING_SNAKE_CASE__ = [urls]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
assert "train" in downloaded_paths.keys()
SCREAMING_SNAKE_CASE__ = downloaded_paths.values()
SCREAMING_SNAKE_CASE__ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
SCREAMING_SNAKE_CASE__ = Path(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
SCREAMING_SNAKE_CASE__ = downloaded_path.read_text()
assert content == CONTENT
SCREAMING_SNAKE_CASE__ = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
SCREAMING_SNAKE_CASE__ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ = str(__UpperCAmelCase )
if issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = filename
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = [filename]
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = {"train": filename}
SCREAMING_SNAKE_CASE__ = "dummy"
SCREAMING_SNAKE_CASE__ = xz_file.parent
SCREAMING_SNAKE_CASE__ = "extracted"
SCREAMING_SNAKE_CASE__ = DownloadConfig(
cache_dir=__UpperCAmelCase , use_etag=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = DownloadManager(dataset_name=__UpperCAmelCase , download_config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = dl_manager.extract(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = paths
for extracted_paths in [extracted_paths]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = [extracted_paths]
SCREAMING_SNAKE_CASE__ = [paths]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
assert "train" in extracted_paths.keys()
SCREAMING_SNAKE_CASE__ = extracted_paths.values()
SCREAMING_SNAKE_CASE__ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
SCREAMING_SNAKE_CASE__ = Path(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__UpperCAmelCase , etag=__UpperCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
SCREAMING_SNAKE_CASE__ = extracted_path.read_text()
SCREAMING_SNAKE_CASE__ = text_file.read_text()
assert extracted_file_content == expected_file_content
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
assert path.endswith(".jsonl" )
for num_items, line in enumerate(__UpperCAmelCase , start=1 ):
SCREAMING_SNAKE_CASE__ = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = request.getfixturevalue(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
_test_jsonl(__UpperCAmelCase , __UpperCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ = request.getfixturevalue(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
_test_jsonl(__UpperCAmelCase , __UpperCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__UpperCAmelCase ) , start=1 ):
assert os.path.basename(__UpperCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 159 | 1 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
_lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
_lowerCamelCase = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = CamembertTokenizer
UpperCAmelCase__ = CamembertTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def __snake_case ( self):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Optional[Any] = CamembertTokenizer(a__)
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = '''<pad>'''
_lowerCamelCase : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__) , a__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__) , a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-1] , '''<mask>''')
self.assertEqual(len(a__) , 1004)
def __snake_case ( self):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = CamembertTokenizer(a__)
tokenizer.save_pretrained(self.tmpdirname)
_lowerCamelCase : Optional[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
_lowerCamelCase : Optional[int] = '''I was born in 92000, and this is falsé.'''
_lowerCamelCase : Optional[int] = tokenizer.encode(a__)
_lowerCamelCase : List[Any] = rust_tokenizer.encode(a__)
self.assertListEqual(a__ , a__)
_lowerCamelCase : Optional[Any] = tokenizer.encode(a__ , add_special_tokens=a__)
_lowerCamelCase : Optional[Any] = rust_tokenizer.encode(a__ , add_special_tokens=a__)
self.assertListEqual(a__ , a__)
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(a__)
_lowerCamelCase : List[str] = rust_tokenizer.tokenize(a__)
self.assertListEqual(a__ , a__)
def __snake_case ( self):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : int = self.get_rust_tokenizer()
_lowerCamelCase : Tuple = '''I was born in 92000, and this is falsé.'''
_lowerCamelCase : List[Any] = tokenizer.tokenize(a__)
_lowerCamelCase : Optional[Any] = rust_tokenizer.tokenize(a__)
self.assertListEqual(a__ , a__)
_lowerCamelCase : Dict = tokenizer.encode(a__ , add_special_tokens=a__)
_lowerCamelCase : List[str] = rust_tokenizer.encode(a__ , add_special_tokens=a__)
self.assertListEqual(a__ , a__)
_lowerCamelCase : Tuple = self.get_rust_tokenizer()
_lowerCamelCase : Union[str, Any] = tokenizer.encode(a__)
_lowerCamelCase : Optional[Any] = rust_tokenizer.encode(a__)
self.assertListEqual(a__ , a__)
@slow
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_lowerCamelCase : str = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=a__ , )
| 613 |
def __UpperCAmelCase( lowercase_ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : Tuple = len(lowercase_ )
_lowerCamelCase : List[str] = max(lowercase_ )
_lowerCamelCase : Any = min(lowercase_ )
# create the counting array
_lowerCamelCase : str = coll_max + 1 - coll_min
_lowerCamelCase : Union[str, Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase_ ):
_lowerCamelCase : Union[str, Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase_ ) ):
_lowerCamelCase : Union[str, Any] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __UpperCAmelCase( lowercase_ ):
return "".join([chr(lowercase_ ) for i in counting_sort([ord(lowercase_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 613 | 1 |
from __future__ import annotations
from collections.abc import Generator
def snake_case ( ) -> Generator[int, None, None]:
_A = {}
_A = 2
while True:
_A = factor_map.pop(snake_case__ , snake_case__)
if factor:
_A = factor + prime
while x in factor_map:
x += factor
_A = factor
else:
_A = prime
yield prime
prime += 1
def snake_case ( snake_case__ :float = 1E10) -> int:
_A = sieve()
_A = 1
while True:
_A = next(snake_case__)
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(snake_case__)
n += 2
if __name__ == "__main__":
print(solution())
| 401 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Any = DistilBertTokenizer
lowerCamelCase :Optional[Any] = DistilBertTokenizerFast
lowerCamelCase :Union[str, Any] = True
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
_A = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase_ )
_A = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_A = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 401 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ :Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ :List[str] = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = 'lilt'
def __init__( self : List[Any] , A__ : List[Any]=30522 , A__ : Tuple=768 , A__ : List[Any]=12 , A__ : Optional[int]=12 , A__ : Any=3072 , A__ : Dict="gelu" , A__ : Any=0.1 , A__ : Dict=0.1 , A__ : Tuple=512 , A__ : int=2 , A__ : Dict=0.02 , A__ : str=1e-1_2 , A__ : Tuple=0 , A__ : int="absolute" , A__ : int=None , A__ : Optional[Any]=4 , A__ : Optional[Any]=1024 , **A__ : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=_a , **_a )
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : str = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : Any = initializer_range
__lowerCamelCase : int = layer_norm_eps
__lowerCamelCase : Dict = position_embedding_type
__lowerCamelCase : int = classifier_dropout
__lowerCamelCase : List[Any] = channel_shrink_ratio
__lowerCamelCase : Tuple = max_ad_position_embeddings
| 721 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase () -> str:
"""simple docstring"""
__lowerCamelCase : Any = HfArgumentParser(_lowercase )
__lowerCamelCase : List[str] = parser.parse_args_into_dataclasses()[0]
__lowerCamelCase : Dict = TensorFlowBenchmark(args=_lowercase )
try:
__lowerCamelCase : List[str] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__lowerCamelCase : Tuple = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
__lowerCamelCase : List[Any] = """ """.join(str(_lowercase ).split(""" """ )[:-1] )
__lowerCamelCase : Tuple = """"""
__lowerCamelCase : List[str] = eval(str(_lowercase ).split(""" """ )[-1] )
__lowerCamelCase : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_lowercase )
if len(_lowercase ) > 0:
__lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(_lowercase )
raise ValueError(_lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 483 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class _UpperCamelCase ( _lowerCAmelCase):
__lowerCamelCase = "van"
def __init__(self , lowerCamelCase__=2_2_4 , lowerCamelCase__=3 , lowerCamelCase__=[7, 3, 3, 3] , lowerCamelCase__=[4, 2, 2, 2] , lowerCamelCase__=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowerCamelCase__=[3, 3, 1_2, 3] , lowerCamelCase__=[8, 8, 4, 4] , lowerCamelCase__="gelu" , lowerCamelCase__=0.0_2 , lowerCamelCase__=1E-6 , lowerCamelCase__=1E-2 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
A__ = image_size
A__ = num_channels
A__ = patch_sizes
A__ = strides
A__ = hidden_sizes
A__ = depths
A__ = mlp_ratios
A__ = hidden_act
A__ = initializer_range
A__ = layer_norm_eps
A__ = layer_scale_init_value
A__ = drop_path_rate
A__ = dropout_rate
| 574 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = AutoencoderKL
__UpperCAmelCase : Optional[Any] = "sample"
__UpperCAmelCase : Optional[int] = 1e-2
@property
def __snake_case ( self : Dict ) -> Optional[Any]:
__snake_case : Optional[Any] = 4
__snake_case : Tuple = 3
__snake_case : List[str] = (32, 32)
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase )
return {"sample": image}
@property
def __snake_case ( self : Union[str, Any] ) -> Tuple:
return (3, 32, 32)
@property
def __snake_case ( self : int ) -> int:
return (3, 32, 32)
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__snake_case : Any = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : str ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> List[str]:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def __snake_case ( self : Any ) -> Optional[Any]:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case : int = self.prepare_init_args_and_inputs_for_common()
__snake_case : str = self.model_class(**lowerCamelCase )
model.to(lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
__snake_case : str = model(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case : Any = torch.randn_like(lowerCamelCase )
__snake_case : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case : Optional[int] = self.model_class(**lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case : int = model_a(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case : Union[str, Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case : Optional[int] = dict(model.named_parameters() )
__snake_case : List[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case , __snake_case : Optional[Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase )
__snake_case : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __snake_case ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case : Tuple = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
__snake_case : Dict = model.to(lowerCamelCase )
model.eval()
if torch_device == "mps":
__snake_case : int = torch.manual_seed(0 )
else:
__snake_case : str = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case : Union[str, Any] = image.to(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , sample_posterior=lowerCamelCase , generator=lowerCamelCase ).sample
__snake_case : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case : Union[str, Any] = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__snake_case : Tuple = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
__snake_case : List[str] = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1E-2 ) )
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ) -> List[str]:
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase ) for s in shape] )}.npy'
def __snake_case ( self : List[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Tuple , lowerCamelCase : List[Any]=0 , lowerCamelCase : Tuple=(4, 3, 512, 512) , lowerCamelCase : Optional[int]=False ) -> str:
__snake_case : List[Any] = torch.floataa if fpaa else torch.floataa
__snake_case : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase , lowerCamelCase ) ) ).to(lowerCamelCase ).to(lowerCamelCase )
return image
def __snake_case ( self : Optional[Any] , lowerCamelCase : int="CompVis/stable-diffusion-v1-4" , lowerCamelCase : int=False ) -> int:
__snake_case : str = "fp16" if fpaa else None
__snake_case : int = torch.floataa if fpaa else torch.floataa
__snake_case : int = AutoencoderKL.from_pretrained(
lowerCamelCase , subfolder="vae" , torch_dtype=lowerCamelCase , revision=lowerCamelCase , )
model.to(lowerCamelCase ).eval()
return model
def __snake_case ( self : str , lowerCamelCase : int=0 ) -> Optional[Any]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase )
return torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ) -> List[Any]:
__snake_case : Optional[Any] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
__snake_case : Tuple = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : int = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : Any = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Any = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
with torch.no_grad():
__snake_case : int = model(lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : List[str] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Any ) -> Optional[Any]:
__snake_case : List[str] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : str = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case : Optional[int] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[str] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : Union[str, Any] = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Optional[Any] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> Tuple:
__snake_case : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : Any = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Any = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : List[Any] , lowerCamelCase : Any ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : Union[str, Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : List[Any] = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Dict = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : int = self.get_sd_image(lowerCamelCase )
__snake_case : int = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model.encode(lowerCamelCase ).latent_dist
__snake_case : Dict = dist.sample(generator=lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case : List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case : Dict = torch.tensor(lowerCamelCase )
__snake_case : Dict = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=lowerCamelCase )
| 81 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCAmelCase ={
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase ={
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase ={
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase ={
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
__UpperCAmelCase ={
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
__UpperCAmelCase ={
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
__UpperCAmelCase ={
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__UpperCAmelCase ={
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__UpperCAmelCase ={
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Dict = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase =collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__UpperCAmelCase =collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__UpperCAmelCase =r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase_ )
class lowerCAmelCase__ :
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A__ = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles]
A__ = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts]
A__ = len(UpperCamelCase__ )
A__ = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
f"""There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts.""" )
A__ = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"]
A__ = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"]
A__ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ )
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A__ = attention_mask
return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 16 , UpperCamelCase__ = 64 , UpperCamelCase__ = 4 , ):
'''simple docstring'''
A__ = reader_input["input_ids"]
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCamelCase__ )
A__ = sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ )
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id )
else:
A__ = len(UpperCamelCase__ )
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(UpperCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A__ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ )
A__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
A__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : str = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase__ : Optional[int] = ["""input_ids""", """attention_mask"""]
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
A__ = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
A__ = model(UpperCamelCase__ )["last_hidden_state"]
A__ = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 261 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class a_ ( __SCREAMING_SNAKE_CASE ):
a : torch.FloatTensor
a : Optional[torch.FloatTensor] = None
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int]=0.9_99 , SCREAMING_SNAKE_CASE_ : List[Any]="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_lowercase = i / num_diffusion_timesteps
_lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class a_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , __UpperCamelCase = 1_000 , __UpperCamelCase = "fixed_small_log" , __UpperCamelCase = True , __UpperCamelCase = 1.0 , __UpperCamelCase = "epsilon" , __UpperCamelCase = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
_lowercase = betas_for_alpha_bar(_snake_case )
_lowercase = 1.0 - self.betas
_lowercase = torch.cumprod(self.alphas , dim=0 )
_lowercase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowercase = 1.0
# setable values
_lowercase = None
_lowercase = torch.from_numpy(np.arange(0 , _snake_case )[::-1].copy() )
_lowercase = variance_type
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
return sample
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
_lowercase = num_inference_steps
_lowercase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowercase = (np.arange(0 , _snake_case ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowercase = torch.from_numpy(_snake_case ).to(_snake_case )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ):
if prev_timestep is None:
_lowercase = t - 1
_lowercase = self.alphas_cumprod[t]
_lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowercase = 1 - alpha_prod_t
_lowercase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowercase = self.betas[t]
else:
_lowercase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowercase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowercase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowercase = torch.log(torch.clamp(_snake_case , min=1E-20 ) )
_lowercase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowercase = variance.log()
_lowercase = beta.log()
_lowercase = (predicted_variance + 1) / 2
_lowercase = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase=None , __UpperCamelCase = True , ):
_lowercase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowercase = torch.split(_snake_case , sample.shape[1] , dim=1 )
else:
_lowercase = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowercase = t - 1
_lowercase = self.alphas_cumprod[t]
_lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowercase = 1 - alpha_prod_t
_lowercase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowercase = self.betas[t]
_lowercase = self.alphas[t]
else:
_lowercase = 1 - alpha_prod_t / alpha_prod_t_prev
_lowercase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowercase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowercase = torch.clamp(
_snake_case , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowercase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowercase = 0
if t > 0:
_lowercase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_snake_case , device=model_output.device )
_lowercase = self._get_variance(
_snake_case , predicted_variance=_snake_case , prev_timestep=_snake_case , )
if self.variance_type == "fixed_small_log":
_lowercase = variance
elif self.variance_type == "learned_range":
_lowercase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
""" for the UnCLIPScheduler.""" )
_lowercase = variance * variance_noise
_lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_snake_case , pred_original_sample=_snake_case )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
_lowercase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
_lowercase = timesteps.to(original_samples.device )
_lowercase = alphas_cumprod[timesteps] ** 0.5
_lowercase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowercase = sqrt_alpha_prod.unsqueeze(-1 )
_lowercase = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowercase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowercase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 287 |
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :int = {}
class A_ ( _UpperCAmelCase ):
_lowerCamelCase : Tuple = """llama"""
_lowerCamelCase : List[str] = ["""past_key_values"""]
def __init__( self : List[str] , snake_case_ : Union[str, Any]=3_2_0_0_0 , snake_case_ : List[str]=4_0_9_6 , snake_case_ : List[Any]=1_1_0_0_8 , snake_case_ : List[Any]=3_2 , snake_case_ : Dict=3_2 , snake_case_ : Tuple=None , snake_case_ : int="silu" , snake_case_ : Optional[Any]=2_0_4_8 , snake_case_ : List[str]=0.0_2 , snake_case_ : Union[str, Any]=1e-6 , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=0 , snake_case_ : Union[str, Any]=1 , snake_case_ : Dict=2 , snake_case_ : Optional[Any]=1 , snake_case_ : str=False , snake_case_ : str=None , **snake_case_ : List[Any] , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = num_key_value_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = pretraining_tp
_UpperCAmelCase = use_cache
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ , )
def lowercase ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
_UpperCAmelCase = self.rope_scaling.get("type" , lowercase__ )
_UpperCAmelCase = self.rope_scaling.get("factor" , lowercase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowercase__ , lowercase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 704 |
'''simple docstring'''
import sys
from collections import defaultdict
class A_ :
def __init__( self : Dict ):
_UpperCAmelCase = []
def lowercase ( self : Union[str, Any] , snake_case_ : List[str] ):
return self.node_position[vertex]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Union[str, Any] ):
_UpperCAmelCase = pos
def lowercase ( self : Optional[Any] , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCAmelCase = 2 * start + 1
else:
_UpperCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child]
_UpperCAmelCase , _UpperCAmelCase = (
heap[start],
positions[start],
)
_UpperCAmelCase , _UpperCAmelCase = temp, tempa
_UpperCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Any ):
_UpperCAmelCase = position[index]
while index != 0:
_UpperCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCAmelCase = heap[parent]
_UpperCAmelCase = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(snake_case_ , snake_case_ )
break
_UpperCAmelCase = parent
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(snake_case_ , 0 )
def lowercase ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Any ):
_UpperCAmelCase = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def lowercase ( self : Any , snake_case_ : str , snake_case_ : str ):
_UpperCAmelCase = positions[0]
_UpperCAmelCase = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def UpperCAmelCase_ ( __lowercase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Heap()
_UpperCAmelCase = [0] * len(__lowercase )
_UpperCAmelCase = [-1] * len(__lowercase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCAmelCase = []
for vertex in range(len(__lowercase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowercase )
heap.node_position.append(__lowercase )
_UpperCAmelCase = []
_UpperCAmelCase = 1
_UpperCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCAmelCase = 0
_UpperCAmelCase = distance
heap.heapify(__lowercase , __lowercase )
for _ in range(1 , len(__lowercase ) ):
_UpperCAmelCase = heap.delete_minimum(__lowercase , __lowercase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowercase )]
):
_UpperCAmelCase = distance
heap.bottom_to_top(
__lowercase , heap.get_position(__lowercase ) , __lowercase , __lowercase )
_UpperCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__SCREAMING_SNAKE_CASE :Optional[int] = int(input('''Enter number of edges: ''').strip())
__SCREAMING_SNAKE_CASE :Optional[int] = defaultdict(list)
for _ in range(edges_number):
__SCREAMING_SNAKE_CASE :Dict = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 119 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__SCREAMING_SNAKE_CASE = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
UpperCamelCase_ : Tuple =(
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCamelCase_ : Optional[int] =bs[:]
UpperCamelCase_ : List[str] =0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
UpperCamelCase_ : Dict =[chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def A_ ( __lowercase ):
UpperCamelCase_ : Optional[Any] =set()
UpperCamelCase_ : Dict =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase_ : Tuple =char
return pairs
class a__ ( A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :Dict , _lowerCamelCase :List[Any] , _lowerCamelCase :Any , _lowerCamelCase :Dict="replace" , _lowerCamelCase :List[str]="<s>" , _lowerCamelCase :int="</s>" , _lowerCamelCase :Dict="</s>" , _lowerCamelCase :Optional[int]="<s>" , _lowerCamelCase :Union[str, Any]="<unk>" , _lowerCamelCase :Optional[Any]="<pad>" , _lowerCamelCase :Union[str, Any]="<mask>" , _lowerCamelCase :Tuple=False , **_lowerCamelCase :Tuple , ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
UpperCamelCase_ : int =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
UpperCamelCase_ : str =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
UpperCamelCase_ : Optional[int] =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
UpperCamelCase_ : Dict =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
UpperCamelCase_ : str =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_ : int =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding='utf-8' ) as vocab_handle:
UpperCamelCase_ : Union[str, Any] =json.load(_lowerCamelCase )
UpperCamelCase_ : Tuple ={v: k for k, v in self.encoder.items()}
UpperCamelCase_ : Optional[Any] =errors # how to handle errors in decoding
UpperCamelCase_ : int =bytes_to_unicode()
UpperCamelCase_ : Optional[int] ={v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding='utf-8' ) as merges_handle:
UpperCamelCase_ : int =merges_handle.read().split('\n' )[1:-1]
UpperCamelCase_ : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase_ : Any =dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCamelCase_ : Tuple ={}
UpperCamelCase_ : List[str] =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase_ : List[Any] =re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCamelCase_ : Dict =tuple(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
UpperCamelCase_ : int =min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase_ , UpperCamelCase_ : Dict =bigram
UpperCamelCase_ : str =[]
UpperCamelCase_ : List[str] =0
while i < len(_lowerCamelCase ):
try:
UpperCamelCase_ : Union[str, Any] =word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase_ : Any =j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase_ : Any =tuple(_lowerCamelCase )
UpperCamelCase_ : Optional[int] =new_word
if len(_lowerCamelCase ) == 1:
break
else:
UpperCamelCase_ : str =get_pairs(_lowerCamelCase )
UpperCamelCase_ : Dict =' '.join(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =word
return word
def lowerCamelCase_ ( self :int , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Dict =[]
for token in re.findall(self.pat , _lowerCamelCase ):
UpperCamelCase_ : Tuple =''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(' ' ) )
return bpe_tokens
def lowerCamelCase_ ( self :Any , _lowerCamelCase :str ):
'''simple docstring'''
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :List[Any] ):
'''simple docstring'''
return self.decoder.get(_lowerCamelCase )
def lowerCamelCase_ ( self :str , _lowerCamelCase :int ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =''.join(_lowerCamelCase )
UpperCamelCase_ : List[Any] =bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ : Dict =os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_ : Optional[Any] =os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + '\n' )
UpperCamelCase_ : List[str] =0
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase_ : List[Any] =token_index
writer.write(' '.join(_lowerCamelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self :str , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_ : Dict =[self.cls_token_id]
UpperCamelCase_ : Optional[int] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self :str , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None , _lowerCamelCase :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def lowerCamelCase_ ( self :Any , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =[self.sep_token_id]
UpperCamelCase_ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :Any , _lowerCamelCase :Optional[int]=False , **_lowerCamelCase :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : Dict =kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
UpperCamelCase_ : List[Any] =' ' + text
return (text, kwargs)
| 357 |
"""simple docstring"""
def A_ ( __lowercase , __lowercase , __lowercase ):
if len(__lowercase ) != len(__lowercase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCamelCase_ : int =[p / w for p, w in zip(__lowercase , __lowercase )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCamelCase_ : Optional[int] =sorted(__lowercase )
# declaring useful variables
UpperCamelCase_ : Optional[int] =len(__lowercase )
UpperCamelCase_ : Optional[int] =0
UpperCamelCase_ : List[Any] =0
UpperCamelCase_ : Optional[int] =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCamelCase_ : List[str] =sorted_profit_by_weight[length - i - 1]
UpperCamelCase_ : Optional[Any] =profit_by_weight.index(__lowercase )
UpperCamelCase_ : str =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
__SCREAMING_SNAKE_CASE = [int(x) for x in input('Input profits separated by spaces: ').split()]
__SCREAMING_SNAKE_CASE = [int(x) for x in input('Input weights separated by spaces: ').split()]
__SCREAMING_SNAKE_CASE = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 357 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase ( lowerCAmelCase__ ):
A__ : Dict = ["image_processor", "tokenizer"]
A__ : List[str] = "AutoImageProcessor"
A__ : int = "AutoTokenizer"
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase__ =self.image_processor
def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCamelCase__ =self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
lowerCamelCase__ =self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCamelCase__ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _a ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 709 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a ={
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 132 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
__magic_name__ : Optional[int] =namedtuple('CoinsDistribResult', 'moves excess')
def __snake_case ( lowerCamelCase_ : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase_ ) != count_coins(lowerCamelCase_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(lowerCamelCase_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__magic_name__ , __magic_name__ = get_distrib(node.left )
__magic_name__ , __magic_name__ = get_distrib(node.right )
__magic_name__ = 1 - left_distrib_excess
__magic_name__ = 1 - right_distrib_excess
__magic_name__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase_ )
+ abs(lowerCamelCase_ )
)
__magic_name__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase_ , lowerCamelCase_ )
return get_distrib(lowerCamelCase_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ : Tuple =threading.Lock()
__magic_name__ : Optional[logging.Handler] =None
__magic_name__ : List[str] ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__magic_name__ : str =logging.WARNING
__magic_name__ : Any =True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __snake_case ( ):
'''simple docstring'''
return __name__.split("." )[0]
def __snake_case ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__magic_name__ = logging.StreamHandler() # Set sys.stderr as stream.
__magic_name__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
__magic_name__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__magic_name__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__magic_name__ = None
def __snake_case ( ):
'''simple docstring'''
return log_levels
def __snake_case ( lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if name is None:
__magic_name__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( lowerCamelCase_ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase_ )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
__magic_name__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowerCamelCase_ )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : int =warning_advice
@functools.lru_cache(lowerCamelCase_ )
def __snake_case ( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Optional[int] =warning_once
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Any: # pylint: disable=unused-argument
__magic_name__ = args[0] if args else None
def __iter__( self : int ) -> Tuple:
return iter(self._iterator )
def __getattr__( self : List[Any] , _lowerCamelCase : int ) -> List[Any]:
def empty_fn(*_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ) -> Any:
return self
def __exit__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ) -> Dict:
return
class UpperCamelCase_ :
"""simple docstring"""
def __call__( self : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> Union[str, Any]:
__magic_name__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : str ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ : List[Any] =_tqdm_cls()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = True
hf_hub_utils.enable_progress_bars()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = False
hf_hub_utils.disable_progress_bars()
| 664 | 1 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__snake_case = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__snake_case = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__snake_case = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Any=None , lowercase_ : Optional[Any]=None , lowercase_ : int=None , lowercase_ : Optional[Any]="auto" , lowercase_ : Optional[Any]=-1 , lowercase_ : str=0.9 , lowercase_ : Optional[Any]=5 , lowercase_ : str=500 , lowercase_ : str="gpt2-large" , lowercase_ : int=-1 , lowercase_ : int=1_024 , lowercase_ : Dict=25 , lowercase_ : Union[str, Any]=5 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=25 , ):
'''simple docstring'''
lowercase_ = compute_mauve(
p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , )
return out
| 603 |
'''simple docstring'''
import argparse
__snake_case = """docs/source/_static/js/custom.js"""
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Any:
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ = f.readlines()
lowercase_ = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
__snake_case = parser.parse_args()
update_custom_js(args.version)
| 603 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ['MaskFormerFeatureExtractor']
A_ : Optional[int] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
A_ : Optional[int] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 456 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[str] = RoCBertTokenizer
UpperCAmelCase__: Dict = None
UpperCAmelCase__: Optional[Any] = False
UpperCAmelCase__: Union[str, Any] = True
UpperCAmelCase__: Union[str, Any] = filter_non_english
def __A ( self ):
super().setUp()
A__ : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
A__ : Union[str, Any] = {}
A__ : Dict = {}
for i, value in enumerate(A__ ):
A__ : Optional[int] = i
A__ : Optional[int] = i
A__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
A__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(A__ , A__ , ensure_ascii=A__ )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(A__ , A__ , ensure_ascii=A__ )
def __A ( self ):
A__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A__ : Optional[Any] = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(A__ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A__ ) , [5, 6, 2, 5, 7, 8] )
def __A ( self ):
A__ : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __A ( self ):
A__ : List[str] = RoCBertBasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
A__ : int = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __A ( self ):
A__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
A__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
A__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
A__ : Dict = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
A__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
A__ : Dict = RoCBertBasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __A ( self ):
A__ : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
A__ : Any = {}
for i, token in enumerate(A__ ):
A__ : Optional[int] = i
A__ : Optional[Any] = RoCBertWordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __A ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __A ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __A ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __A ( self ):
A__ : Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
A__ : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def __A ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : Dict = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : str = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
A__ : Union[str, Any] = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
A__ : Any = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False
A__ : List[str] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __A ( self ):
A__ : Union[str, Any] = ["""的""", """人""", """有"""]
A__ : List[str] = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : Any = True
A__ : int = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Dict = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : List[str] = tokenizer_p.encode(A__ , add_special_tokens=A__ )
A__ : List[Any] = tokenizer_r.encode(A__ , add_special_tokens=A__ )
A__ : Tuple = tokenizer_r.convert_ids_to_tokens(A__ )
A__ : int = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
A__ : Optional[int] = False
A__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : str = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Tuple = tokenizer_r.encode(A__ , add_special_tokens=A__ )
A__ : List[str] = tokenizer_p.encode(A__ , add_special_tokens=A__ )
A__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(A__ )
A__ : Tuple = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
A__ : str = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
@slow
def __A ( self ):
A__ : str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=A__ )
A__ : List[Any] = tokenizer.encode("""你是谁""" , add_special_tokens=A__ )
A__ : Any = tokenizer.build_inputs_with_special_tokens(A__ )
A__ : Any = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __A ( self ):
A__ : List[str] = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
A__ : Optional[int] = """你好,你是谁"""
A__ : List[str] = tokenizer.tokenize(A__ )
A__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(A__ )
A__ : str = tokenizer.convert_tokens_to_shape_ids(A__ )
A__ : Optional[int] = tokenizer.convert_tokens_to_pronunciation_ids(A__ )
A__ : Union[str, Any] = tokenizer.prepare_for_model(
A__ , A__ , A__ , add_special_tokens=A__ )
A__ : int = tokenizer.encode_plus(A__ , add_special_tokens=A__ )
self.assertEqual(A__ , A__ )
| 456 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : List[Any] = logging.get_logger(__name__)
__a : Any = {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/config.json""",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : List[Any] = '''xglm'''
__a : str = ['''past_key_values''']
__a : Tuple = {
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCAmelCase__=25_60_08 , lowerCAmelCase__=20_48 , lowerCAmelCase__=10_24 , lowerCAmelCase__=40_96 , lowerCAmelCase__=24 , lowerCAmelCase__=16 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> str:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = ffn_dim
__lowercase = num_layers
__lowercase = attention_heads
__lowercase = activation_function
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = layerdrop
__lowercase = init_std
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase = use_cache
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 522 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__a : Optional[Any] = logging.getLogger(__name__)
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=lowercase , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=lowercase , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=lowercase , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=lowercase , default='''data/dump''' , help='''The dump file prefix.''' )
__lowercase = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
__lowercase = BertTokenizer.from_pretrained(args.tokenizer_name )
__lowercase = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__lowercase = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__lowercase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__lowercase = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__lowercase = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__lowercase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__lowercase = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__lowercase = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__lowercase = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F"{len(lowercase )} examples to process." )
__lowercase = []
__lowercase = 0
__lowercase = 10000
__lowercase = time.time()
for text in data:
__lowercase = F"{bos} {text.strip()} {sep}"
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
rslt.append(lowercase )
iter += 1
if iter % interval == 0:
__lowercase = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
__lowercase = time.time()
logger.info('''Finished binarization''' )
logger.info(F"{len(lowercase )} examples processed." )
__lowercase = F"{args.dump_file}.{args.tokenizer_name}.pickle"
__lowercase = tokenizer.vocab_size
if vocab_size < (1 << 16):
__lowercase = [np.uintaa(lowercase ) for d in rslt]
else:
__lowercase = [np.intaa(lowercase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(lowercase , '''wb''' ) as handle:
pickle.dump(rslt_ , lowercase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 522 | 1 |
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["LayoutLMv2FeatureExtractor"]
_lowercase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 632 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def A (__A : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : Dict = StableDiffusionLatentUpscalePipeline
UpperCAmelCase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
UpperCAmelCase__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Tuple = frozenset([] )
UpperCAmelCase__ : Optional[Any] = True
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = 1
UpperCAmelCase_ = 4
UpperCAmelCase_ = (16, 16)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(_snake_case)
return image
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_snake_case , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_snake_case , only_cross_attention=_snake_case , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
UpperCAmelCase_ = EulerDiscreteScheduler(prediction_type='''sample''')
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : int=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = pipe(**_snake_case).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3))
UpperCAmelCase_ = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5])
UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_snake_case , 1e-3)
def lowerCamelCase ( self : str):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def lowerCamelCase ( self : int):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7e-3)
def lowerCamelCase ( self : str):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
UpperCAmelCase_ = getattr(_snake_case , scheduler_enum.name)
UpperCAmelCase_ = scheduler_cls.from_config(pipe.scheduler.config)
UpperCAmelCase_ = pipe(**_snake_case)[0]
outputs.append(_snake_case)
assert check_same_shape(_snake_case)
@require_torch_gpu
@slow
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = torch.manual_seed(33)
UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa)
pipe.to('''cuda''')
UpperCAmelCase_ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa)
upscaler.to('''cuda''')
UpperCAmelCase_ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
UpperCAmelCase_ = pipe(_snake_case , generator=_snake_case , output_type='''latent''').images
UpperCAmelCase_ = upscaler(
prompt=_snake_case , image=_snake_case , num_inference_steps=20 , guidance_scale=0 , generator=_snake_case , output_type='''np''' , ).images[0]
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''')
assert np.abs((expected_image - image).mean()) < 5e-2
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.manual_seed(33)
UpperCAmelCase_ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa)
upscaler.to('''cuda''')
UpperCAmelCase_ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''')
UpperCAmelCase_ = upscaler(
prompt=_snake_case , image=_snake_case , num_inference_steps=20 , guidance_scale=0 , generator=_snake_case , output_type='''np''' , ).images[0]
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''')
assert np.abs((expected_image - image).max()) < 5e-2
| 169 |
def A (__A : str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__A ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 169 | 1 |
import numpy
class _snake_case :
def __init__( self : Dict, __lowercase : numpy.ndarray, __lowercase : numpy.ndarray ):
lowercase__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowercase__ = numpy.random.rand(
self.input_array.shape[1], 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowercase__ = numpy.random.rand(
4, 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowercase__ = numpy.random.rand(3, 1 )
# Real output values provided.
lowercase__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowercase__ = numpy.zeros(output_array.shape )
def A__ ( self : List[Any] ):
lowercase__ = sigmoid(
numpy.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) )
return self.layer_between_second_hidden_layer_and_output
def A__ ( self : Optional[Any] ):
lowercase__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T, 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ), )
lowercase__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T, numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ), self.second_hidden_layer_and_output_layer_weights.T, )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ), )
lowercase__ = numpy.dot(
self.input_array.T, numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ), self.second_hidden_layer_and_output_layer_weights.T, )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ), self.first_hidden_layer_and_second_hidden_layer_weights.T, )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ), )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A__ ( self : Any, __lowercase : numpy.ndarray, __lowercase : int, __lowercase : bool ):
for iteration in range(1, iterations + 1 ):
lowercase__ = self.feedforward()
self.back_propagation()
if give_loss:
lowercase__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def A__ ( self : Union[str, Any], __lowercase : numpy.ndarray ):
lowercase__ = input_arr
lowercase__ = sigmoid(
numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights ) )
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) )
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return 1 / (1 + numpy.exp(-value ))
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return (value) * (1 - (value))
def __lowerCAmelCase ( ):
lowercase__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowercase__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowercase__ = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 413 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
])
class _snake_case ( unittest.TestCase):
def A__ ( self : Dict ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split(), encoding="utf-8", check=__lowercase, )
assert hasattr(self, "env" )
def A__ ( self : Tuple, __lowercase : Tuple ):
# configuration for running training on smdistributed Model Parallel
lowercase__ = {
"enabled": True,
"processes_per_host": 8,
}
lowercase__ = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowercase__ = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowercase__ = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''', instance_count=__lowercase, instance_type=self.instance_type, debugger_hook_config=__lowercase, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
}, metric_definitions=self.env.metric_definitions, distribution=__lowercase, py_version="py36", )
def A__ ( self : Union[str, Any], __lowercase : List[Any] ):
TrainingJobAnalytics(__lowercase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def A__ ( self : Dict, __lowercase : Optional[int] ):
# create estimator
lowercase__ = self.create_estimator(__lowercase )
# run training
estimator.fit()
# result dataframe
lowercase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowercase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''', "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, __lowercase )
| 413 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def a ( A__ : int ) -> Dict:
"""simple docstring"""
_lowercase =[tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = StableDiffusionLatentUpscalePipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
_a = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
_a = True
@property
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =1
_lowercase =4
_lowercase =(16, 16)
_lowercase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase )
return image
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase =UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=lowerCAmelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=lowerCAmelCase , only_cross_attention=lowerCAmelCase , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
_lowercase =AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
_lowercase =EulerDiscreteScheduler(prediction_type='sample' )
_lowercase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='quick_gelu' , projection_dim=512 , )
_lowercase =CLIPTextModel(lowerCAmelCase )
_lowercase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowercase ={
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> str:
'''simple docstring'''
if str(lowerCAmelCase ).startswith('mps' ):
_lowercase =torch.manual_seed(lowerCAmelCase )
else:
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
_lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase ='cpu'
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =self.get_dummy_inputs(lowerCAmelCase )
_lowercase =pipe(**lowerCAmelCase ).images
_lowercase =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_lowercase =np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
_lowercase =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1e-3 )
def A__ ( self ) -> int:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def A__ ( self ) -> int:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def A__ ( self ) -> Any:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def A__ ( self ) -> int:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =[
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**lowerCAmelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =self.get_dummy_inputs(lowerCAmelCase )
_lowercase =2
_lowercase =[]
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowercase =getattr(lowerCAmelCase , scheduler_enum.name )
_lowercase =scheduler_cls.from_config(pipe.scheduler.config )
_lowercase =pipe(**lowerCAmelCase )[0]
outputs.append(lowerCAmelCase )
assert check_same_shape(lowerCAmelCase )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =torch.manual_seed(33 )
_lowercase =StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
_lowercase =StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_lowercase ='a photo of an astronaut high resolution, unreal engine, ultra realistic'
_lowercase =pipe(lowerCAmelCase , generator=lowerCAmelCase , output_type='latent' ).images
_lowercase =upscaler(
prompt=lowerCAmelCase , image=lowerCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=lowerCAmelCase , output_type='np' , ).images[0]
_lowercase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =torch.manual_seed(33 )
_lowercase =StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_lowercase ='the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
_lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
_lowercase =upscaler(
prompt=lowerCAmelCase , image=lowerCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=lowerCAmelCase , output_type='np' , ).images[0]
_lowercase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 706 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a ( A__ : Tuple , A__ : List[Any] , A__ : Optional[int] , A__ : Dict , A__ : Any=False , A__ : str=True ) -> str:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_lowercase , _lowercase , _lowercase , _lowercase =MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_lowercase =cached_file(A__ , A__ , force_download=not use_cached_models )
_lowercase =config_class.from_json_file(A__ )
_lowercase =True
_lowercase =True
print(F'''Building TensorFlow model from configuration: {config}''' )
_lowercase =model_class(A__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_lowercase =cached_file(
A__ , A__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_lowercase =load_pytorch_checkpoint_in_tfa_model(A__ , A__ )
if compare_with_pt_model:
_lowercase =tf_model(tf_model.dummy_inputs , training=A__ ) # build the network
_lowercase =torch.load(A__ , map_location='cpu' )
_lowercase =pt_model_class.from_pretrained(
pretrained_model_name_or_path=A__ , config=A__ , state_dict=A__ )
with torch.no_grad():
_lowercase =pt_model(**pt_model.dummy_inputs )
_lowercase =pto[0].numpy()
_lowercase =tfo[0].numpy()
_lowercase =np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A__ , save_format='h5' )
def a ( A__ : str , A__ : str , A__ : Optional[Any]=None , A__ : Any=None , A__ : Optional[int]=False , A__ : Optional[int]=False , A__ : int=False , A__ : str=False , ) -> List[Any]:
"""simple docstring"""
if args_model_type is None:
_lowercase =list(MODEL_CLASSES.keys() )
else:
_lowercase =[args_model_type]
for j, model_type in enumerate(A__ , start=1 ):
print('=' * 100 )
print(F''' Converting model type {j}/{len(A__ )}: {model_type}''' )
print('=' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_lowercase =list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_lowercase =model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A__ , A__ ) , start=1 ):
print('-' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
_lowercase =model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(A__ )}: {model_shortcut_name} - model_type {model_type}''' )
print('-' * 100 )
if config_shortcut_name in aws_config_map:
_lowercase =cached_file(A__ , A__ , force_download=not use_cached_models )
else:
_lowercase =config_shortcut_name
if model_shortcut_name in aws_model_maps:
_lowercase =cached_file(A__ , A__ , force_download=not use_cached_models )
else:
_lowercase =model_shortcut_name
if os.path.isfile(A__ ):
_lowercase ='converted_model'
convert_pt_checkpoint_to_tf(
model_type=A__ , pytorch_checkpoint_path=A__ , config_file=A__ , tf_dump_path=os.path.join(A__ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=A__ , )
if remove_cached_files:
os.remove(A__ )
os.remove(A__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 380 | 0 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] =VOCAB_FILES_NAMES
a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] =BlenderbotSmallTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case__ , merges=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , ) , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , **snake_case__ , )
lowerCAmelCase : Optional[Any] = add_prefix_space
def lowercase__ ( self , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 645 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="SpeechT5FeatureExtractor"
a : Any ="SpeechT5Tokenizer"
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
def __call__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = kwargs.pop("audio" , snake_case__ )
lowerCAmelCase : Tuple = kwargs.pop("text" , snake_case__ )
lowerCAmelCase : str = kwargs.pop("text_target" , snake_case__ )
lowerCAmelCase : List[str] = kwargs.pop("audio_target" , snake_case__ )
lowerCAmelCase : Union[str, Any] = kwargs.pop("sampling_rate" , snake_case__ )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
lowerCAmelCase : int = self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
elif text is not None:
lowerCAmelCase : Optional[int] = self.tokenizer(snake_case__ , **snake_case__ )
else:
lowerCAmelCase : Union[str, Any] = None
if audio_target is not None:
lowerCAmelCase : Optional[Any] = self.feature_extractor(audio_target=snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
lowerCAmelCase : Any = targets["input_values"]
elif text_target is not None:
lowerCAmelCase : Tuple = self.tokenizer(snake_case__ , **snake_case__ )
lowerCAmelCase : str = targets["input_ids"]
else:
lowerCAmelCase : str = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : List[str] = labels
lowerCAmelCase : List[Any] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase : Union[str, Any] = decoder_attention_mask
return inputs
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = kwargs.pop("input_values" , snake_case__ )
lowerCAmelCase : List[Any] = kwargs.pop("input_ids" , snake_case__ )
lowerCAmelCase : Dict = kwargs.pop("labels" , snake_case__ )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
lowerCAmelCase : int = self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
elif input_ids is not None:
lowerCAmelCase : Optional[Any] = self.tokenizer.pad(snake_case__ , **snake_case__ )
else:
lowerCAmelCase : Optional[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(snake_case__ , snake_case__ ) and "input_ids" in labels[0]):
lowerCAmelCase : Tuple = self.tokenizer.pad(snake_case__ , **snake_case__ )
lowerCAmelCase : Any = targets["input_ids"]
else:
lowerCAmelCase : List[Any] = self.feature_extractor.feature_size
lowerCAmelCase : Optional[int] = self.feature_extractor.num_mel_bins
lowerCAmelCase : str = self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = feature_size_hack
lowerCAmelCase : Optional[Any] = targets["input_values"]
else:
lowerCAmelCase : List[Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : int = labels
lowerCAmelCase : Optional[int] = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase : List[Any] = decoder_attention_mask
return inputs
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
| 645 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class a__ ( _lowercase ):
__magic_name__ : Union[str, Any] = "gptsan-japanese"
__magic_name__ : int = [
"past_key_values",
]
__magic_name__ : str = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self : List[str], __UpperCAmelCase : str=36000, __UpperCAmelCase : Union[str, Any]=1280, __UpperCAmelCase : List[str]=1024, __UpperCAmelCase : Optional[int]=8192, __UpperCAmelCase : Dict=4096, __UpperCAmelCase : Optional[Any]=128, __UpperCAmelCase : Dict=10, __UpperCAmelCase : Any=0, __UpperCAmelCase : List[Any]=16, __UpperCAmelCase : Optional[int]=16, __UpperCAmelCase : Dict=128, __UpperCAmelCase : int=0.0, __UpperCAmelCase : List[Any]=1e-5, __UpperCAmelCase : List[str]=False, __UpperCAmelCase : Optional[int]=0.0, __UpperCAmelCase : List[str]="float32", __UpperCAmelCase : Dict=False, __UpperCAmelCase : List[Any]=False, __UpperCAmelCase : Union[str, Any]=False, __UpperCAmelCase : Optional[int]=0.002, __UpperCAmelCase : int=False, __UpperCAmelCase : Dict=True, __UpperCAmelCase : str=35998, __UpperCAmelCase : Any=35995, __UpperCAmelCase : int=35999, **__UpperCAmelCase : str, ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : int = d_ff
SCREAMING_SNAKE_CASE : int = d_ext
SCREAMING_SNAKE_CASE : Optional[int] = d_spout
SCREAMING_SNAKE_CASE : Dict = num_switch_layers
SCREAMING_SNAKE_CASE : str = num_ext_layers
SCREAMING_SNAKE_CASE : int = num_switch_layers + num_ext_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : Tuple = expert_capacity
SCREAMING_SNAKE_CASE : List[Any] = dropout_rate
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Optional[Any] = router_bias
SCREAMING_SNAKE_CASE : Dict = router_jitter_noise
SCREAMING_SNAKE_CASE : str = router_dtype
SCREAMING_SNAKE_CASE : str = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : List[Any] = output_hidden_states
SCREAMING_SNAKE_CASE : Tuple = output_attentions
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_factor
SCREAMING_SNAKE_CASE : str = output_router_logits
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
super().__init__(
separator_token_id=__UpperCAmelCase, pad_token_id=__UpperCAmelCase, eos_token_id=__UpperCAmelCase, **__UpperCAmelCase, )
| 355 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ):
return x + 2
class a__ ( unittest.TestCase ):
def lowercase__ (self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = '''x = 3'''
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Any = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3} )
SCREAMING_SNAKE_CASE : str = '''x = y'''
SCREAMING_SNAKE_CASE : int = {'''y''': 5}
SCREAMING_SNAKE_CASE : Optional[int] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 5, '''y''': 5} )
def lowercase__ (self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = '''y = add_two(x)'''
SCREAMING_SNAKE_CASE : Optional[Any] = {'''x''': 3}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE : Tuple = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''x = 3'''
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3} )
def lowercase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : List[Any] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = '''x = 3\ny = 5'''
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
def lowercase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''text = f\'This is x: {x}.\''''
SCREAMING_SNAKE_CASE : List[str] = {'''x''': 3}
SCREAMING_SNAKE_CASE : Dict = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''text''': '''This is x: 3.'''} )
def lowercase__ (self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
SCREAMING_SNAKE_CASE : int = {'''x''': 3}
SCREAMING_SNAKE_CASE : List[str] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 2} )
SCREAMING_SNAKE_CASE : Any = {'''x''': 8}
SCREAMING_SNAKE_CASE : int = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 8, '''y''': 5} )
def lowercase__ (self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''test_list = [x, add_two(x)]'''
SCREAMING_SNAKE_CASE : List[str] = {'''x''': 3}
SCREAMING_SNAKE_CASE : Tuple = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase, [3, 5] )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} )
def lowercase__ (self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = '''y = x'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 3} )
def lowercase__ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = '''test_list = [x, add_two(x)]\ntest_list[1]'''
SCREAMING_SNAKE_CASE : int = {'''x''': 3}
SCREAMING_SNAKE_CASE : Optional[int] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} )
SCREAMING_SNAKE_CASE : Dict = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''x = 0\nfor i in range(3):\n x = i'''
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : List[str] = evaluate(__UpperCAmelCase, {'''range''': range}, state=__UpperCAmelCase )
assert result == 2
self.assertDictEqual(__UpperCAmelCase, {'''x''': 2, '''i''': 2} )
| 355 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) -> None:
super().__init__(**A_ )
lowerCAmelCase = size if size is not None else {"""shortest_edge""": 256}
lowerCAmelCase = get_size_dict(A_ , default_to_square=A_ )
lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase = get_size_dict(A_ , param_name="""crop_size""" )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase = get_resize_output_image_size(A_ , size=size["""shortest_edge"""] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def __snake_case ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ , size=(size["""height"""], size["""width"""]) , data_format=A_ , **A_ )
def __snake_case ( self , A_ , A_ , A_ = None , **A_ ) -> np.ndarray:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def __snake_case ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def __snake_case ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> List[str]:
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(A_ , default_to_square=A_ )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(A_ , param_name="""crop_size""" )
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=A_ , tensor_type=A_ )
def __snake_case ( self , A_ , A_ = None ) -> Union[str, Any]:
lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(A_ ):
lowerCAmelCase = target_sizes.numpy()
lowerCAmelCase = []
for idx in range(len(A_ ) ):
lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=A_ )
lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
lowerCAmelCase = logits.argmax(dim=1 )
lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 433 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 433 | 1 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCamelCase : int , lowerCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
__magic_name__ : Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__magic_name__ : Dict = n - k
# Calculate C(n,k)
for i in range(lowerCamelCase_ ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase_ ( lowerCamelCase : Tuple ) -> Dict:
"""simple docstring"""
return binomial_coefficient(2 * node_count , lowerCamelCase_ ) // (node_count + 1)
def UpperCamelCase_ ( lowerCamelCase : Dict ) -> str:
"""simple docstring"""
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
__magic_name__ : List[str] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase_ ( lowerCamelCase : int ) -> Tuple:
"""simple docstring"""
return catalan_number(lowerCamelCase_ ) * factorial(lowerCamelCase_ )
if __name__ == "__main__":
A = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 713 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A = TypeVar("""T""")
A = TypeVar("""U""")
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Any , snake_case : T | None , snake_case : U | None ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[Any] = key
__magic_name__ : str = val
__magic_name__ : DoubleLinkedListNode[T, U] | None = None
__magic_name__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : str ) -> str:
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : str ) -> None:
'''simple docstring'''
__magic_name__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case , snake_case )
__magic_name__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case , snake_case )
__magic_name__ , __magic_name__ : Tuple = self.rear, self.head
def __repr__( self : str ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = ['''DoubleLinkedList''']
__magic_name__ : Optional[Any] = self.head
while node.next is not None:
rep.append(str(snake_case ) )
__magic_name__ : Any = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case )
def _UpperCAmelCase ( self : List[str] , snake_case : DoubleLinkedListNode[T, U] ) -> None:
'''simple docstring'''
__magic_name__ : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__magic_name__ : Dict = node
__magic_name__ : Optional[int] = previous
__magic_name__ : Tuple = node
__magic_name__ : Optional[int] = self.rear
def _UpperCAmelCase ( self : str , snake_case : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__magic_name__ : str = node.next
__magic_name__ : Dict = node.prev
__magic_name__ : Any = None
__magic_name__ : Dict = None
return node
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
snake_case_ = {}
def __init__( self : Dict , snake_case : int ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : DoubleLinkedList[T, U] = DoubleLinkedList()
__magic_name__ : str = capacity
__magic_name__ : Tuple = 0
__magic_name__ : Optional[Any] = 0
__magic_name__ : List[str] = 0
__magic_name__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : List[str] , snake_case : T ) -> bool:
'''simple docstring'''
return key in self.cache
def _UpperCAmelCase ( self : Optional[int] , snake_case : T ) -> U | None:
'''simple docstring'''
if key in self.cache:
self.hits += 1
__magic_name__ : DoubleLinkedListNode[T, U] = self.cache[key]
__magic_name__ : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case )
return node.val
self.miss += 1
return None
def _UpperCAmelCase ( self : str , snake_case : T , snake_case : U ) -> None:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__magic_name__ : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__magic_name__ : List[str] = DoubleLinkedListNode(snake_case , snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__magic_name__ : Dict = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__magic_name__ : Any = value
self.list.add(snake_case )
@classmethod
def _UpperCAmelCase ( cls : Tuple , snake_case : int = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
'''simple docstring'''
def cache_decorator_inner(snake_case : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__magic_name__ : Any = LRUCache(snake_case )
__magic_name__ : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__magic_name__ : Any = func(*snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case , '''cache_info''' , snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : list[dict] = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(A )
self.set_fail_transitions()
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = 0
for character in keyword:
SCREAMING_SNAKE_CASE : Any = self.find_next_state(A, A )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE : List[str] = len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE : int = next_state
self.adlist[current_state]["output"].append(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while q:
SCREAMING_SNAKE_CASE : List[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.adlist[r]['fail_state']
while (
self.find_next_state(A, self.adlist[child]['value'] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE : str = self.adlist[state]['fail_state']
SCREAMING_SNAKE_CASE : int = self.find_next_state(
A, self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : int = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : dict = {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(len(A ) ):
while (
self.find_next_state(A, string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.adlist[current_state]['fail_state']
SCREAMING_SNAKE_CASE : Tuple = self.find_next_state(A, string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE : Any = 0
else:
SCREAMING_SNAKE_CASE : Dict = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE : Dict = []
result[key].append(i - len(A ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , __lowercase : Any , __lowercase : Union[str, Any]=7 , __lowercase : List[str]=3 , __lowercase : List[Any]=18 , __lowercase : str=30 , __lowercase : Optional[Any]=400 , __lowercase : Dict=True , __lowercase : int=None , __lowercase : Tuple=True , __lowercase : Optional[Any]=None , __lowercase : List[str]=True , __lowercase : List[Any]=[0.5, 0.5, 0.5] , __lowercase : Any=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__a = size if size is not None else {"""shortest_edge""": 18}
__a = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict =LevitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = LevitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , """image_mean""" ) )
self.assertTrue(hasattr(__lowercase , """image_std""" ) )
self.assertTrue(hasattr(__lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowercase , """do_resize""" ) )
self.assertTrue(hasattr(__lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowercase , """size""" ) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__a = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 225 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[Any]:
A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
A = ''
else:
A = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
A = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[
: config.hidden_size, :
]
A = in_proj_bias[: config.hidden_size]
A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A = in_proj_weight[
-config.hidden_size :, :
]
A = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( lowerCamelCase__ ) -> int:
A = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( lowerCamelCase__ ) -> Optional[int]:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
A = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
A = dct.pop(lowerCamelCase__ )
A = val
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
A = ViTMSNConfig()
A = 1000
A = 'datasets/huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ ) , 'r' ) )
A = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
A = 384
A = 1536
A = 6
elif "l16" in checkpoint_url:
A = 1024
A = 4096
A = 24
A = 16
A = 0.1
elif "b4" in checkpoint_url:
A = 4
elif "l7" in checkpoint_url:
A = 7
A = 1024
A = 4096
A = 24
A = 16
A = 0.1
A = ViTMSNModel(lowerCamelCase__ )
A = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='cpu' )['target_encoder']
A = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase__ )
A = create_rename_keys(lowerCamelCase__ , base_model=lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , base_model=lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
A = ViTImageProcessor(
size=config.image_size , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ )
A = image_processor(images=lowerCamelCase__ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
A = model(**lowerCamelCase__ )
A = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
A = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
A = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
A = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
A = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
A = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCamelCase__ , atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 109 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( UpperCamelCase ,unittest.TestCase ):
lowerCAmelCase_ : Tuple = LayoutLMTokenizer
lowerCAmelCase_ : Any = LayoutLMTokenizerFast
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = True
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
A = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : str , **snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self : List[str] , snake_case : int ) -> List[Any]:
'''simple docstring'''
A = 'UNwant\u00E9d,running'
A = 'unwanted, running'
return input_text, output_text
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [7, 4, 5, 10, 8, 9] )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
| 109 | 1 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ : int = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ : int = concatenate_datasets
lowerCamelCase__ : List[Any] = DownloadConfig
lowerCamelCase__ : int = DownloadManager
lowerCamelCase__ : List[str] = DownloadMode
lowerCamelCase__ : List[str] = DownloadConfig
lowerCamelCase__ : int = DownloadMode
lowerCamelCase__ : List[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 31 |
import operator as op
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 31 | 1 |
import os
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.dirname(os.path.realpath(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCamelCase , """triangle.txt""" )
with open(__UpperCamelCase ) as f:
SCREAMING_SNAKE_CASE__ = f.readlines()
SCREAMING_SNAKE_CASE__ = []
for line in triangle:
SCREAMING_SNAKE_CASE__ = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(__UpperCamelCase ) )
a.append(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(len(a[i] ) ):
SCREAMING_SNAKE_CASE__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
SCREAMING_SNAKE_CASE__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__UpperCamelCase , __UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 379 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowerCamelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ = 1_00_00
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __snake_case ( datasets.ArrowBasedBuilder ):
lowerCAmelCase_ = ParquetConfig
def __a ( self : List[str] ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __a ( self : Dict , _lowercase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
SCREAMING_SNAKE_CASE__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowercase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE__ = data_files
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(_lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
SCREAMING_SNAKE_CASE__ = []
for split_name, files in data_files.items():
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(_lowercase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_lowercase ):
with open(_lowercase , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = datasets.Features.from_arrow_schema(pq.read_schema(_lowercase ) )
break
splits.append(datasets.SplitGenerator(name=_lowercase , gen_kwargs={"""files""": files} ) )
return splits
def __a ( self : Optional[Any] , _lowercase : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE__ = table_cast(_lowercase , self.info.features.arrow_schema )
return pa_table
def __a ( self : Dict , _lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowercase ) ):
with open(_lowercase , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = pq.ParquetFile(_lowercase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
SCREAMING_SNAKE_CASE__ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(_lowercase )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(_lowercase )}: {e}""" )
raise
| 379 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class A__( __magic_name__ ):
lowerCAmelCase = '''codegen'''
lowerCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=5_04_00 , __SCREAMING_SNAKE_CASE : Optional[Any]=20_48 , __SCREAMING_SNAKE_CASE : str=20_48 , __SCREAMING_SNAKE_CASE : Optional[int]=40_96 , __SCREAMING_SNAKE_CASE : Dict=28 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : List[Any]=64 , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Dict="gelu_new" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-5 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=5_02_56 , __SCREAMING_SNAKE_CASE : List[Any]=5_02_56 , __SCREAMING_SNAKE_CASE : List[Any]=False , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_ctx
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = n_inner
__SCREAMING_SNAKE_CASE = rotary_dim
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = attn_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class A__( __magic_name__ ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> List[Any]:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__SCREAMING_SNAKE_CASE = 0
@property
def _a ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' )
__SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _a ( self : Any ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
return self._config.n_head
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__SCREAMING_SNAKE_CASE = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE = common_inputs['''attention_mask''']
if self.use_past:
__SCREAMING_SNAKE_CASE = ordered_inputs['''attention_mask'''].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
return 13
| 482 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class A__( unittest.TestCase ):
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_activation('''swish''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_activation('''silu''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_activation('''mish''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_activation('''gelu''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 482 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
__snake_case = list[tuple[int, int]]
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
_a = pos_x
_a = pos_y
_a = (pos_y, pos_x)
_a = goal_x
_a = goal_y
_a = parent
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
_a = Node(start[1] , start[0] , goal[1] , goal[0] , __UpperCAmelCase )
_a = Node(goal[1] , goal[0] , goal[1] , goal[0] , __UpperCAmelCase )
_a = [self.start]
_a = False
def _UpperCAmelCase ( self ) -> Path | None:
while self.node_queue:
_a = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_a = True
return self.retrace_path(__UpperCAmelCase )
_a = self.get_successors(__UpperCAmelCase )
for node in successors:
self.node_queue.append(__UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> list[Node]:
_a = []
for action in delta:
_a = parent.pos_x + action[1]
_a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__UpperCAmelCase , __UpperCAmelCase , self.target.pos_y , self.target.pos_x , __UpperCAmelCase ) )
return successors
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Path:
_a = node
_a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
_a = BreadthFirstSearch(__UpperCAmelCase , __UpperCAmelCase )
_a = BreadthFirstSearch(__UpperCAmelCase , __UpperCAmelCase )
_a = False
def _UpperCAmelCase ( self ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_a = self.fwd_bfs.node_queue.pop(0 )
_a = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_a = True
return self.retrace_bidirectional_path(
__UpperCAmelCase , __UpperCAmelCase )
_a = current_bwd_node
_a = current_fwd_node
_a = {
self.fwd_bfs: self.fwd_bfs.get_successors(__UpperCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(__UpperCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__UpperCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Path:
_a = self.fwd_bfs.retrace_path(__UpperCAmelCase )
_a = self.bwd_bfs.retrace_path(__UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
_a = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = BreadthFirstSearch(init, goal)
__snake_case = bfs.search()
__snake_case = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__snake_case = time.time()
__snake_case = BidirectionalBreadthFirstSearch(init, goal)
__snake_case = bd_bfs.search()
__snake_case = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 285 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
_a , _a = 1, 1
_a = 2
while True:
_a = 0
_a = fa + fa
_a , _a = fa, f
index += 1
for _ in str(_lowerCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 285 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
_lowerCamelCase = """CIDAS/clipseg-rd64-refined"""
_lowerCamelCase = """image_segmenter"""
_lowerCamelCase = CLIPSegForImageSegmentation
_lowerCamelCase = ["""image""", """text"""]
_lowerCamelCase = ["""image"""]
def __init__( self , *__A , **__A ):
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def snake_case_ ( self , __A , __A ):
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def snake_case_ ( self , __A ):
with torch.no_grad():
__a = self.model(**__A ).logits
return logits
def snake_case_ ( self , __A ):
__a = outputs.cpu().detach().numpy()
__a = 0
__a = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 99 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ) -> list[int]:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = len(__snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCamelCase__ = i + 1
else:
lowerCamelCase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 481 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """longformer"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] = 5_1_2 , UpperCamelCase__ : List[str] = 2 , UpperCamelCase__ : str = 1 , UpperCamelCase__ : List[str] = 0 , UpperCamelCase__ : List[str] = 2 , UpperCamelCase__ : Dict = 3_0_5_2_2 , UpperCamelCase__ : Tuple = 7_6_8 , UpperCamelCase__ : Optional[Any] = 1_2 , UpperCamelCase__ : int = 1_2 , UpperCamelCase__ : int = 3_0_7_2 , UpperCamelCase__ : str = "gelu" , UpperCamelCase__ : List[str] = 0.1 , UpperCamelCase__ : str = 0.1 , UpperCamelCase__ : Dict = 5_1_2 , UpperCamelCase__ : Optional[int] = 2 , UpperCamelCase__ : List[Any] = 0.0_2 , UpperCamelCase__ : Dict = 1E-1_2 , UpperCamelCase__ : Any = False , **UpperCamelCase__ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCamelCase = attention_window
UpperCamelCase = sep_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = onnx_export
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] = "default" , UpperCamelCase__ : List[str] = None ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCamelCase = True
@property
def A ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = super().outputs
if self.task == "default":
UpperCamelCase = {0: "batch"}
return outputs
@property
def A ( self : Any ):
"""simple docstring"""
return 1E-4
@property
def A ( self : Tuple ):
"""simple docstring"""
return max(super().default_onnx_opset , 1_4 )
def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] = -1 , UpperCamelCase__ : Any = -1 , UpperCamelCase__ : Dict = False , UpperCamelCase__ : str = None , ):
"""simple docstring"""
UpperCamelCase = super().generate_dummy_inputs(
preprocessor=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCamelCase = torch.zeros_like(inputs['input_ids'] )
# make every second token global
UpperCamelCase = 1
return inputs
| 709 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : VQModel , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : int = 5_0 , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : Any , ):
"""simple docstring"""
UpperCamelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase__ , )
UpperCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
UpperCamelCase = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase = self.vqvae.decode(UpperCamelCase__ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 324 | 0 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCAmelCase ( lowercase : str , lowercase : Dict ) ->int:
"""simple docstring"""
lowercase__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
lowercase__ = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowercase ).unsqueeze(0 ).to(lowercase )
return image
def _lowerCAmelCase ( lowercase : str ) ->List[str]:
"""simple docstring"""
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowercase )
if "blocks" in key:
lowercase__ = re.sub(R'''blocks''' , '''layers''' , lowercase )
if "attn" in key:
lowercase__ = re.sub(R'''attn''' , '''self_attn''' , lowercase )
if "norm1" in key:
lowercase__ = re.sub(R'''norm1''' , '''layer_norm1''' , lowercase )
if "norm2" in key:
lowercase__ = re.sub(R'''norm2''' , '''layer_norm2''' , lowercase )
if "encoder.norm" in key:
lowercase__ = re.sub(R'''encoder.norm''' , '''post_layernorm''' , lowercase )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowercase )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowercase )
if "encoder.cls_token" in key:
lowercase__ = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowercase )
if "self_attn" in key:
lowercase__ = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , lowercase )
return key
@torch.no_grad()
def _lowerCAmelCase ( lowercase : List[str] , lowercase : Any=None ) ->Dict:
"""simple docstring"""
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowercase )
else:
lowercase__ = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowercase ).eval()
lowercase__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
lowercase__ = blip_decoder(pretrained=lowercase , image_size=3_8_4 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowercase )
lowercase__ = rename_key(lowercase )
lowercase__ = value
hf_model.load_state_dict(lowercase )
lowercase__ = 3_8_4
lowercase__ = load_demo_image(image_size=lowercase , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowercase , lowercase )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
lowercase__ = hf_model.generate(lowercase )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
lowercase__ = blip_vqa(pretrained=lowercase , image_size=lowercase , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowercase )
lowercase__ = rename_key(lowercase )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowercase )
hf_vqa_model.load_state_dict(lowercase )
lowercase__ = ['How many dogs are in this image?']
lowercase__ = tokenizer(lowercase , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowercase , lowercase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
lowercase__ = blip_itm(pretrained=lowercase , image_size=lowercase , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowercase )
lowercase__ = rename_key(lowercase )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowercase )
lowercase__ = ['A picture of a woman with a dog sitting in a beach']
lowercase__ = tokenizer(
lowercase , return_tensors='''pt''' , padding='''max_length''' , truncation=lowercase , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowercase , lowercase , use_itm_head=lowercase )
lowercase__ = hf_itm_model(lowercase , lowercase , use_itm_head=lowercase )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 161 |
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
_snake_case : str = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_snake_case : List[Any] = ''
_snake_case : Dict = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_snake_case , _snake_case : Union[str, Any] = 0, 0
# length[i] shows the length of palindromic substring with center i
_snake_case : Optional[Any] = [1 for i in range(len(lowerCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
_snake_case : Any = 0
for j in range(len(lowerCAmelCase ) ):
_snake_case : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_snake_case : List[str] = j - k + 1 # noqa: E741
_snake_case : List[Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
_snake_case : List[Any] = length[j]
_snake_case : Optional[Any] = j
# create that string
_snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 411 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _UpperCamelCase :
'''simple docstring'''
_A : Any = LEDConfig
_A : str = {}
_A : List[str] = "gelu"
def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=1_3 , lowerCAmelCase__ : int=7 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : str=9_9 , lowerCAmelCase__ : str=3_2 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : List[Any]=3_7 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Dict=2_0 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Dict=1 , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : List[Any]=4 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
__SCREAMING_SNAKE_CASE : List[str] = batch_size
__SCREAMING_SNAKE_CASE : str = seq_length
__SCREAMING_SNAKE_CASE : str = is_training
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : int = eos_token_id
__SCREAMING_SNAKE_CASE : Dict = pad_token_id
__SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
__SCREAMING_SNAKE_CASE : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__SCREAMING_SNAKE_CASE : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__SCREAMING_SNAKE_CASE : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__SCREAMING_SNAKE_CASE : Dict = prepare_led_inputs_dict(__a , __a , __a )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = TFLEDModel(config=__a ).get_decoder()
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict["input_ids"]
__SCREAMING_SNAKE_CASE : int = input_ids[:1, :]
__SCREAMING_SNAKE_CASE : List[str] = inputs_dict["attention_mask"][:1, :]
__SCREAMING_SNAKE_CASE : List[Any] = 1
# first forward pass
__SCREAMING_SNAKE_CASE : Any = model(__a , attention_mask=__a , use_cache=__a )
__SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__SCREAMING_SNAKE_CASE : Tuple = model(__a , attention_mask=__a )[0]
__SCREAMING_SNAKE_CASE : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__SCREAMING_SNAKE_CASE : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__SCREAMING_SNAKE_CASE : List[str] = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: str , _lowerCamelCase: Dict , _lowerCamelCase: Union[str, Any]=None , _lowerCamelCase: Optional[int]=None , _lowerCamelCase: Optional[Any]=None , _lowerCamelCase: str=None , ):
if attention_mask is None:
__SCREAMING_SNAKE_CASE : str = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _UpperCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_A : Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_A : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_A : List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : Tuple = True
_A : str = False
_A : Optional[Any] = False
_A : int = False
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = TFLEDModelTester(self )
__SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=__a )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Optional[int] = tf.zeros_like(inputs_dict["""attention_mask"""] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2
__SCREAMING_SNAKE_CASE : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : str = self.model_tester.seq_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCAmelCase__ : Optional[int] ):
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowerCAmelCase__ : Optional[Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
__SCREAMING_SNAKE_CASE : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Optional[int] = model_class(__a )
__SCREAMING_SNAKE_CASE : int = model(self._prepare_for_class(__a , __a ) )
__SCREAMING_SNAKE_CASE : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__a )
__SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : Tuple = model_class(__a )
__SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : Tuple = model_class(__a )
__SCREAMING_SNAKE_CASE : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] ):
return tf.constant(lowercase_ , dtype=tf.intaa )
UpperCamelCase__ : Any = 1E-4
@slow
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
__SCREAMING_SNAKE_CASE : int = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__SCREAMING_SNAKE_CASE : Tuple = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
__SCREAMING_SNAKE_CASE : Optional[int] = model(**__a )[0]
__SCREAMING_SNAKE_CASE : Optional[int] = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , __a )
# change to expected output here
__SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
__SCREAMING_SNAKE_CASE : Optional[int] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__SCREAMING_SNAKE_CASE : List[str] = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(**__a )[0]
__SCREAMING_SNAKE_CASE : int = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
__SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-3 , rtol=1E-3 )
| 707 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __get__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
__SCREAMING_SNAKE_CASE : Any = """__cached_""" + self.fget.__name__
__SCREAMING_SNAKE_CASE : str = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if cached is None:
__SCREAMING_SNAKE_CASE : int = self.fget(lowerCAmelCase__ )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return cached
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : List[str] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] ):
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase , np.ndarray )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
return isinstance(_lowerCamelCase , np.ndarray )
def lowerCAmelCase_ ( _lowerCamelCase: Dict ):
return _is_numpy(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
import torch
return isinstance(_lowerCamelCase , torch.Tensor )
def lowerCAmelCase_ ( _lowerCamelCase: str ):
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Dict ):
import torch
return isinstance(_lowerCamelCase , torch.device )
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase , torch.dtype )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: str ):
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor )
def lowerCAmelCase_ ( _lowerCamelCase: int ):
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray )
def lowerCAmelCase_ ( _lowerCamelCase: List[str] ):
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = fields(self )
# Safety and consistency checks
if not len(lowerCAmelCase__ ):
raise ValueError(F"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"{self.__class__.__name__} should not have more than one required field." )
__SCREAMING_SNAKE_CASE : Dict = getattr(self , class_fields[0].name )
__SCREAMING_SNAKE_CASE : Dict = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = first_field.items()
__SCREAMING_SNAKE_CASE : Dict = True
else:
try:
__SCREAMING_SNAKE_CASE : List[Any] = iter(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = True
except TypeError:
__SCREAMING_SNAKE_CASE : int = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCAmelCase__ ):
if (
not isinstance(lowerCAmelCase__ , (list, tuple) )
or not len(lowerCAmelCase__ ) == 2
or not isinstance(element[0] , lowerCAmelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__SCREAMING_SNAKE_CASE : str = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = element[1]
elif first_field is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = first_field
else:
for field in class_fields:
__SCREAMING_SNAKE_CASE : List[Any] = getattr(self , field.name )
if v is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = v
def __delitem__( self : Optional[Any] , *lowerCAmelCase__ : int , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
raise Exception(F"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def UpperCamelCase__ ( self : str , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Dict ):
"""simple docstring"""
raise Exception(F"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def UpperCamelCase__ ( self : str , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
raise Exception(F"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def UpperCamelCase__ ( self : int , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
raise Exception(F"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCAmelCase__ , lowerCAmelCase__ )
super().__setattr__(lowerCAmelCase__ , lowerCAmelCase__ )
def __setitem__( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ):
"""simple docstring"""
super().__setitem__(lowerCAmelCase__ , lowerCAmelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ ( cls : List[Any] , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
raise ValueError(
F"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Dict = '''longest'''
_A : Optional[Any] = '''max_length'''
_A : Tuple = '''do_not_pad'''
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = '''pt'''
_A : Union[str, Any] = '''tf'''
_A : Union[str, Any] = '''np'''
_A : Dict = '''jax'''
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase__ : List[ContextManager] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = context_managers
__SCREAMING_SNAKE_CASE : Dict = ExitStack()
def __enter__( self : Union[str, Any] ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(lowerCAmelCase__ )
def __exit__( self : Any , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
self.stack.__exit__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Optional[Any] = infer_framework(_lowerCamelCase )
if framework == "tf":
__SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__SCREAMING_SNAKE_CASE : int = inspect.signature(model_class.forward ) # PyTorch models
else:
__SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : Optional[Any] = model_class.__name__
__SCREAMING_SNAKE_CASE : List[Any] = infer_framework(_lowerCamelCase )
if framework == "tf":
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__SCREAMING_SNAKE_CASE : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
__SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCAmelCase_ ( _lowerCamelCase: MutableMapping , _lowerCamelCase: str = "" , _lowerCamelCase: str = "." ):
def _flatten_dict(_lowerCamelCase: str , _lowerCamelCase: Any="" , _lowerCamelCase: List[Any]="." ):
for k, v in d.items():
__SCREAMING_SNAKE_CASE : Tuple = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
@contextmanager
def lowerCAmelCase_ ( _lowerCamelCase: Dict , _lowerCamelCase: bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[Any]=None ):
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." )
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict ):
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase )
else:
raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: Dict=None ):
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." )
def lowerCAmelCase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: List[str] ):
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: Any ):
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list) ):
__SCREAMING_SNAKE_CASE : Dict = [F"{repo_id}--{v}" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
__SCREAMING_SNAKE_CASE : Any = F"{repo_id}--{value}"
return auto_map
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] ):
for base_class in inspect.getmro(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = base_class.__module__
__SCREAMING_SNAKE_CASE : Any = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 178 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A ( __lowercase ):
lowercase__: Optional[int] = '''Salesforce/blip-image-captioning-base'''
lowercase__: Optional[Any] = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
lowercase__: int = '''image_captioner'''
lowercase__: Optional[Any] = AutoModelForVisionaSeq
lowercase__: int = ['''image''']
lowercase__: List[str] = ['''text''']
def __init__( self : Tuple , *__magic_name__ : List[str] , **__magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Optional[int] , __magic_name__ : "Image" ) -> Optional[Any]:
"""simple docstring"""
return self.pre_processor(images=__magic_name__ , return_tensors="""pt""" )
def lowercase__ ( self : Optional[Any] , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def lowercase__ ( self : Optional[int] , __magic_name__ : str ) -> str:
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
| 26 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict =GPTSanJapaneseTokenizer
__lowerCamelCase : List[Any] =False
__lowerCamelCase : List[str] ={'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
super().setUp()
# fmt: off
__a = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__a = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__a = {"""unk_token""": """<unk>"""}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCamelCase_ ( self : Dict , **__lowercase : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__a = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a , __a = self.get_input_output_texts(__lowercase )
__a = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__a = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.get_tokenizer()
# Testing tokenization
__a = """こんにちは、世界。 こんばんは、㔺界。"""
__a = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__a = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__a = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__a = tokens + [tokenizer.unk_token]
__a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__a = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.get_tokenizer()
# Testing tokenization
__a = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__a = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__a = tokenizer.encode(__lowercase )
__a = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__a = """こんにちは、世界。"""
__a = """こんばんは、㔺界。😀"""
__a = """こんにちは、世界。こんばんは、世界。😀"""
__a = tokenizer.encode(prefix_text + input_text )
__a = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__a = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__a = tokenizer.decode(__lowercase )
__a = tokenizer.decode(__lowercase )
__a = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__a = """こんにちは、世界。"""
__a = """こんばんは、㔺界。😀"""
__a = len(tokenizer.encode(__lowercase ) ) - 2
__a = len(tokenizer.encode(__lowercase ) ) - 2
__a = [1] + [0] * (len_prefix + len_text + 1)
__a = [1] * (len_prefix + len_text + 1) + [0]
__a = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__a = tokenizer(prefix_text + input_text ).token_type_ids
__a = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__a = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__a = tokenizer.encode("""あンいワ""" )
__a = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__a = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__a = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__a = tokenizer(__lowercase , padding=__lowercase )
__a = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__a = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__a = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__a = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# tokenizer has no padding token
pass
| 225 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = ["image_processor", "tokenizer"]
__magic_name__ : Optional[int] = "CLIPImageProcessor"
__magic_name__ : Dict = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
a_ =kwargs.pop("feature_extractor")
a_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(lowerCAmelCase_ , lowerCAmelCase_)
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
a_ =self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_)
if images is not None:
a_ =self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_)
if text is not None and images is not None:
a_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_) , tensor_type=lowerCAmelCase_)
def lowercase_ ( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self , *lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_)
@property
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.tokenizer.model_input_names
a_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 41 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41 | 1 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : Optional[Any] = 16
_lowerCamelCase : List[Any] = 32
def _lowerCAmelCase ( __magic_name__ :Accelerator , __magic_name__ :int = 1_6 ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ :int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ :List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ = 1_6
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ = 8
else:
UpperCAmelCase_ = None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ , drop_last=__magic_name__ )
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def _lowerCAmelCase ( __magic_name__ :Tuple , __magic_name__ :List[Any] ):
# Initialize accelerator
UpperCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config['''lr''']
UpperCAmelCase_ = int(config['''num_epochs'''] )
UpperCAmelCase_ = int(config['''seed'''] )
UpperCAmelCase_ = int(config['''batch_size'''] )
UpperCAmelCase_ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ = MAX_GPU_BATCH_SIZE
set_seed(__magic_name__ )
UpperCAmelCase_, UpperCAmelCase_ = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=1_0_0 , num_training_steps=(len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ = model(**__magic_name__ )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**__magic_name__ )
UpperCAmelCase_ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_, UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
UpperCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __magic_name__ )
def _lowerCAmelCase ( ):
UpperCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 121 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_lowerCamelCase : Any = 'Usage of script: script_name <size_of_canvas:int>'
_lowerCamelCase : Dict = [0] * 100 + [1] * 10
random.shuffle(choice)
def _lowerCAmelCase ( __magic_name__ :int ):
UpperCAmelCase_ = [[False for i in range(__magic_name__ )] for j in range(__magic_name__ )]
return canvas
def _lowerCAmelCase ( __magic_name__ :list[list[bool]] ):
for i, row in enumerate(__magic_name__ ):
for j, _ in enumerate(__magic_name__ ):
UpperCAmelCase_ = bool(random.getrandbits(1 ) )
def _lowerCAmelCase ( __magic_name__ :list[list[bool]] ):
UpperCAmelCase_ = np.array(__magic_name__ )
UpperCAmelCase_ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__magic_name__ ):
for c, pt in enumerate(__magic_name__ ):
UpperCAmelCase_ = __judge_point(
__magic_name__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCAmelCase_ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCAmelCase_ = current_canvas.tolist()
return return_canvas
def _lowerCAmelCase ( __magic_name__ :bool , __magic_name__ :list[list[bool]] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCAmelCase_ = pt
if pt:
if alive < 2:
UpperCAmelCase_ = False
elif alive == 2 or alive == 3:
UpperCAmelCase_ = True
elif alive > 3:
UpperCAmelCase_ = False
else:
if alive == 3:
UpperCAmelCase_ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_lowerCamelCase : Union[str, Any] = int(sys.argv[1])
# main working structure of this module.
_lowerCamelCase : Any = create_canvas(canvas_size)
seed(c)
_lowerCamelCase , _lowerCamelCase : Any = plt.subplots()
fig.show()
_lowerCamelCase : Optional[int] = ListedColormap(['w', 'k'])
try:
while True:
_lowerCamelCase : Union[str, Any] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 121 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a__ ( _snake_case ):
"""simple docstring"""
A__ : Union[List[PIL.Image.Image], np.ndarray]
A__ : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 314 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__magic_name__ = get_tests_dir('''fixtures''')
__magic_name__ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__magic_name__ = get_tests_dir('''fixtures/dummy-config.json''')
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self :str ):
lowercase = 0
def __UpperCAmelCase ( self :Tuple ):
lowercase = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self :Any ):
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self :int ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ ).to_dict()
config_dict.pop('feature_extractor_type' )
lowercase = WavaVecaFeatureExtractor(**lowercase__ )
# save in new folder
model_config.save_pretrained(lowercase__ )
config.save_pretrained(lowercase__ )
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ )
# make sure private variable is not incorrectly saved
lowercase = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self :List[Any] ):
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self :List[Any] ):
with self.assertRaisesRegex(
lowercase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowercase = AutoFeatureExtractor.from_pretrained('bert-base' )
def __UpperCAmelCase ( self :List[str] ):
with self.assertRaisesRegex(
lowercase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ , revision='aaaaaa' )
def __UpperCAmelCase ( self :Any ):
with self.assertRaisesRegex(
lowercase__ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
lowercase = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def __UpperCAmelCase ( self :Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase__ ):
lowercase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase__ ):
lowercase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase__ )
lowercase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase__ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase__ )
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ , trust_remote_code=lowercase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def __UpperCAmelCase ( self :Optional[int] ):
try:
AutoConfig.register('custom' , lowercase__ )
AutoFeatureExtractor.register(lowercase__ , lowercase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase__ ):
AutoFeatureExtractor.register(lowercase__ , lowercase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase = CustomFeatureExtractor.from_pretrained(lowercase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase__ )
lowercase = AutoFeatureExtractor.from_pretrained(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __UpperCAmelCase ( self :Any ):
class a__ ( _snake_case ):
"""simple docstring"""
A__ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase__ )
AutoFeatureExtractor.register(lowercase__ , lowercase__ )
# If remote code is not set, the default is to use local
lowercase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowercase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase__ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowercase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase__ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase__ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 314 | 1 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Dict ) ->List[Any]:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ) ->List[Any]:
UpperCAmelCase_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(UpperCAmelCase__ )
UpperCAmelCase_ = self.values[key]
def lowerCAmelCase__ ( self : List[Any] ) ->str:
return (
sum(self.charge_factor - len(UpperCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=None ) ->str:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(UpperCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(UpperCAmelCase__ , UpperCAmelCase__ )
| 390 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
lowercase__ : Dict = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ = int(re.match(R'''.*layer_(\d*).*''' , _UpperCamelCase )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ = re.search(R'''[^\d](\d+)$''' , str(_UpperCamelCase ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
UpperCAmelCase_ = int(bit_search.groups()[0] )
return bit_size // 8
def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Dict ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ = BloomConfig()
else:
UpperCAmelCase_ = BloomConfig.from_json_file(_UpperCamelCase )
if shard_model:
UpperCAmelCase_ = os.listdir(_UpperCamelCase )
UpperCAmelCase_ = sorted(filter(lambda _UpperCamelCase : s.startswith('''layer''' ) and "model_00" in s , _UpperCamelCase ) )
UpperCAmelCase_ = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = BloomConfig()
for j, file in enumerate(_UpperCamelCase ):
print('''Processing file: {}'''.format(_UpperCamelCase ) )
UpperCAmelCase_ = None
for i in range(_UpperCamelCase ):
# load all TP files
UpperCAmelCase_ = file.replace('''model_00''' , F"""model_0{i}""" )
UpperCAmelCase_ = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ = list(temp.keys() )
for key in keys:
UpperCAmelCase_ = temp.pop(_UpperCamelCase )
if tensors is None:
UpperCAmelCase_ = temp
else:
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ = torch.cat([tensors[key], temp[key]] , dim=_UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ = tensors[key] / pretraining_tp
torch.save(
_UpperCamelCase , os.path.join(
_UpperCamelCase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_UpperCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_UpperCamelCase ) ).zfill(5 ) )
UpperCAmelCase_ = BloomConfig()
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ = total_size
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_UpperCamelCase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '''\n'''
f.write(_UpperCamelCase )
else:
UpperCAmelCase_ = BloomModel(_UpperCamelCase )
UpperCAmelCase_ = os.listdir(_UpperCamelCase )
UpperCAmelCase_ = sorted(filter(lambda _UpperCamelCase : s.startswith('''layer''' ) and "model_00" in s , _UpperCamelCase ) )
UpperCAmelCase_ = None
for i, file in enumerate(_UpperCamelCase ):
UpperCAmelCase_ = None
for i in range(_UpperCamelCase ):
# load all TP files
UpperCAmelCase_ = file.replace('''model_00''' , F"""model_0{i}""" )
UpperCAmelCase_ = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ = list(temp.keys() )
for key in keys:
UpperCAmelCase_ = temp.pop(_UpperCamelCase )
if tensors is None:
UpperCAmelCase_ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ = torch.cat([tensors[key], temp[key]] , dim=_UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ = tensors[key] / pretraining_tp
UpperCAmelCase_ = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
UpperCAmelCase_ = set(other_keys.missing_keys )
else:
UpperCAmelCase_ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
UpperCAmelCase_ = model.to(config.torch_dtype )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
lowercase__ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 390 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Union[str, Any] = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 423 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowercase : int = logging.get_logger(__name__)
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : str = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), F'{len(lowerCamelCase_ )} != {len(lowerCamelCase_ )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowercase : Optional[int] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowercase : Union[str, Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
try:
A : Dict = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(lowerCamelCase_ ) )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(lowerCamelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ = "student" , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ):
A : List[str] = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
AutoTokenizer.from_pretrained(lowerCamelCase_ ).save_pretrained(lowerCamelCase_ ) # purely for convenience
A : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ ).eval()
else:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), F'teacher must be a model or string got type {type(lowerCamelCase_ )}'
A : Tuple = teacher.config.to_diff_dict()
try:
A , A : str = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
A : str = teacher_e
if d is None:
A : List[str] = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
A , A : str = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
A , A : List[str] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
A : Union[str, Any] = teacher_e
if d is None:
A : Any = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCamelCase_ )
# Copy weights
A : Dict = teacher.config_class(**lowerCamelCase_ )
A : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCamelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
A : int = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
A , A : Tuple = list(range(lowerCamelCase_ ) ), list(range(lowerCamelCase_ ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(lowerCamelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
A : List[int] = pick_layers_to_copy(lowerCamelCase_ , lowerCamelCase_ )
if d_layers_to_copy is None:
A : List[int] = pick_layers_to_copy(lowerCamelCase_ , lowerCamelCase_ )
try:
if hasattr(
lowerCamelCase_ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase_ )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
A : Tuple = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(lowerCamelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 423 | 1 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_UpperCamelCase : Any = False
try:
_UpperCamelCase : Any = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = [] ):
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = choices
lowerCAmelCase = prompt
if sys.platform == "win32":
lowerCAmelCase = '*'
else:
lowerCAmelCase = '➔ '
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _SCREAMING_SNAKE_CASE )
else:
forceWrite(self.choices[index] , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_SCREAMING_SNAKE_CASE )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_SCREAMING_SNAKE_CASE )
move_cursor(_SCREAMING_SNAKE_CASE , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_SCREAMING_SNAKE_CASE )] for number in range(10 )] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = int(chr(self.current_selection ) )
lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _SCREAMING_SNAKE_CASE )
else:
return
else:
return
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_SCREAMING_SNAKE_CASE )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
lowerCAmelCase = int(builtins.input() )
except ValueError:
lowerCAmelCase = default_choice
else:
lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(_SCREAMING_SNAKE_CASE , '\n' )
return choice
| 284 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def snake_case ( snake_case : Any ) -> Any:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def snake_case ( snake_case : Dict ) -> str:
"""simple docstring"""
lowerCAmelCase = np.max(_outputs , axis=-1 , keepdims=snake_case )
lowerCAmelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case )
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : List[str] = '''sigmoid'''
SCREAMING_SNAKE_CASE : List[Any] = '''softmax'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''none'''
@add_end_docstrings(
a_ , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Dict = ClassificationFunction.NONE
def __init__( self , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="" , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = tokenizer_kwargs
lowerCAmelCase = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
lowerCAmelCase = self.model.config.return_all_scores
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or top_k is None:
lowerCAmelCase = top_k
lowerCAmelCase = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , _SCREAMING_SNAKE_CASE , )
if return_all_scores:
lowerCAmelCase = None
else:
lowerCAmelCase = 1
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCAmelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCAmelCase = 'top_k' not in kwargs
if isinstance(args[0] , _SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.framework
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return self.tokenizer(**_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , _SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.model(**_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCAmelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCAmelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
lowerCAmelCase = self.model.config.function_to_apply
else:
lowerCAmelCase = ClassificationFunction.NONE
lowerCAmelCase = model_outputs['logits'][0]
lowerCAmelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCAmelCase = sigmoid(_SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCAmelCase = softmax(_SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
lowerCAmelCase = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCAmelCase = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(_SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda _SCREAMING_SNAKE_CASE : x["score"] , reverse=_SCREAMING_SNAKE_CASE )
if top_k is not None:
lowerCAmelCase = dict_scores[:top_k]
return dict_scores
| 284 | 1 |
def __magic_name__ ( __a : List[Any] , __a : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ = len(__a )
UpperCamelCase__ = []
for i in range(len(__a ) - pat_len + 1 ):
UpperCamelCase__ = True
for j in range(__a ):
if s[i + j] != pattern[j]:
UpperCamelCase__ = False
break
if match_found:
position.append(__a )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 701 |
from __future__ import annotations
lowerCamelCase_ = '''#'''
class __A:
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self._trie
for char in text:
if char not in trie:
UpperCamelCase__ = {}
UpperCamelCase__ = trie[char]
UpperCamelCase__ = True
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self._trie
for char in prefix:
if char in trie:
UpperCamelCase__ = trie[char]
else:
return []
return self._elements(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for c, v in d.items():
UpperCamelCase__ = [""" """] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )]
result.extend(SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = Trie()
lowerCamelCase_ = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def __magic_name__ ( __a : str ):
'''simple docstring'''
UpperCamelCase__ = trie.find_word(__a )
return tuple(string + word for word in suffixes )
def __magic_name__ ( ):
'''simple docstring'''
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 86 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE_ : str = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
SCREAMING_SNAKE_CASE_ : str = test_metrics
@require_cpu
def UpperCAmelCase ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCAmelCase ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCAmelCase ( self ):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def UpperCAmelCase ( self ):
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE_ : int = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
| 511 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {'vocab_file': 'spiece.model'}
lowerCAmelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase : Optional[int] = {
't5-small': 5_12,
't5-base': 5_12,
't5-large': 5_12,
't5-3b': 5_12,
't5-11b': 5_12,
}
lowerCAmelCase : Optional[int] = '▁'
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE_ : List[str] = [f"<extra_id_{i}>" for i in range(_SCREAMING_SNAKE_CASE )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE_ : Dict = len(set(filter(lambda _SCREAMING_SNAKE_CASE : bool('extra_id' in str(_SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
SCREAMING_SNAKE_CASE_ : int = legacy
SCREAMING_SNAKE_CASE_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , extra_ids=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Any = vocab_file
SCREAMING_SNAKE_CASE_ : Optional[Any] = extra_ids
SCREAMING_SNAKE_CASE_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@staticmethod
def UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
SCREAMING_SNAKE_CASE_ : int = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , _SCREAMING_SNAKE_CASE , )
return max_model_length
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def UpperCAmelCase ( self ):
"""simple docstring"""
return list(
set(filter(lambda _SCREAMING_SNAKE_CASE : bool(re.search(r'<extra_id_\d+>' , _SCREAMING_SNAKE_CASE ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
return [self._convert_token_to_id(_SCREAMING_SNAKE_CASE ) for token in self.get_sentinel_tokens()]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self._add_eos_if_not_present(_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE_ : int = self._add_eos_if_not_present(_SCREAMING_SNAKE_CASE )
return token_ids_a + token_ids_a
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.legacy:
SCREAMING_SNAKE_CASE_ : Dict = SPIECE_UNDERLINE + text.replace(_SCREAMING_SNAKE_CASE , ' ' )
return super().tokenize(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.legacy:
SCREAMING_SNAKE_CASE_ : List[str] = text.startswith(_SCREAMING_SNAKE_CASE )
if is_first:
SCREAMING_SNAKE_CASE_ : Optional[int] = text[1:]
SCREAMING_SNAKE_CASE_ : Tuple = self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Any = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token.startswith('<extra_id_' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.match(r'<extra_id_(\d+)>' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE_ : str = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = f"<extra_id_{self.vocab_size - 1 - index}>"
return token
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = ''
SCREAMING_SNAKE_CASE_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 511 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A : Tuple = None
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__A : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
__A : int = {
"""facebook/mbart-large-en-ro""": 1_0_2_4,
"""facebook/mbart-large-cc25""": 1_0_2_4,
}
# fmt: off
__A : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ['''input_ids''', '''attention_mask''']
a__ = MBartTokenizer
a__ = []
a__ = []
def __init__( self : str , a : List[Any]=None , a : Optional[int]=None , a : str="<s>" , a : Tuple="</s>" , a : Optional[int]="</s>" , a : int="<s>" , a : List[str]="<unk>" , a : Tuple="<pad>" , a : Dict="<mask>" , a : Optional[Any]=None , a : List[Any]=None , a : Any=None , **a : Optional[Any] , ) -> Any:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
vocab_file=a , tokenizer_file=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , src_lang=a , tgt_lang=a , additional_special_tokens=a , **a , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """en_XX"""
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCAmelCase ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self : List[Any] , a : str ) -> None:
SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self : Optional[Any] , a : Optional[int] , a : str , a : Optional[str] , a : Optional[str] , **a : Optional[Any] ) -> Optional[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = self(a , add_special_tokens=a , return_tensors=a , **a )
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(a )
SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def _UpperCAmelCase ( self : str , a : List[str] , a : str = "en_XX" , a : Optional[List[str]] = None , a : str = "ro_RO" , **a : List[str] , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(a , a , **a )
def _UpperCAmelCase ( self : Optional[Any] ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self : str ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self : Dict , a : List[str] ) -> None:
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(a )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCAmelCase ( self : Dict , a : str ) -> None:
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(a )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCAmelCase ( self : Dict , a : str , a : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 450 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__A : str = logging.get_logger(__name__)
__A : str = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] , a : Tuple=None , **a : Any ) -> List[str]:
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = kwargs.get("""model_save_dir""" , a )
SCREAMING_SNAKE_CASE = kwargs.get("""latest_model_name""" , a )
def __call__( self : List[str] , **a : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE = {k: np.array(a ) for k, v in kwargs.items()}
return self.model.run(a , a )
@staticmethod
def _UpperCAmelCase ( a : Union[str, Path] , a : Any=None , a : Optional[int]=None ) -> Optional[Any]:
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
SCREAMING_SNAKE_CASE = """CPUExecutionProvider"""
return ort.InferenceSession(a , providers=[provider] , sess_options=a )
def _UpperCAmelCase ( self : str , a : Union[str, Path] , a : Optional[str] = None , **a : str ) -> Tuple:
SCREAMING_SNAKE_CASE = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE = self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE = Path(a ).joinpath(a )
try:
shutil.copyfile(a , a )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE = self.model_save_dir.joinpath(a )
if src_path.exists():
SCREAMING_SNAKE_CASE = Path(a ).joinpath(a )
try:
shutil.copyfile(a , a )
except shutil.SameFileError:
pass
def _UpperCAmelCase ( self : Dict , a : Union[str, os.PathLike] , **a : Tuple , ) -> str:
if os.path.isfile(a ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(a , exist_ok=a )
# saving model weights/files
self._save_pretrained(a , **a )
@classmethod
def _UpperCAmelCase ( cls : List[Any] , a : Union[str, Path] , a : Optional[Union[bool, str, None]] = None , a : Optional[Union[str, None]] = None , a : bool = False , a : Optional[str] = None , a : Optional[str] = None , a : Optional[str] = None , a : Optional["ort.SessionOptions"] = None , **a : Union[str, Any] , ) -> List[str]:
SCREAMING_SNAKE_CASE = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(a ):
SCREAMING_SNAKE_CASE = OnnxRuntimeModel.load_model(
os.path.join(a , a ) , provider=a , sess_options=a )
SCREAMING_SNAKE_CASE = Path(a )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id=a , filename=a , use_auth_token=a , revision=a , cache_dir=a , force_download=a , )
SCREAMING_SNAKE_CASE = Path(a ).parent
SCREAMING_SNAKE_CASE = Path(a ).name
SCREAMING_SNAKE_CASE = OnnxRuntimeModel.load_model(a , provider=a , sess_options=a )
return cls(model=a , **a )
@classmethod
def _UpperCAmelCase ( cls : List[Any] , a : Union[str, Path] , a : bool = True , a : Optional[str] = None , a : Optional[str] = None , **a : Union[str, Any] , ) -> Any:
SCREAMING_SNAKE_CASE = None
if len(str(a ).split("""@""" ) ) == 2:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_id.split("""@""" )
return cls._from_pretrained(
model_id=a , revision=a , cache_dir=a , force_download=a , use_auth_token=a , **a , )
| 450 | 1 |
"""simple docstring"""
from __future__ import annotations
__A = list[tuple[int, int]]
__A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class snake_case :
def __init__( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : Node | None , )-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = pos_x
__lowerCAmelCase: str = pos_y
__lowerCAmelCase: int = (pos_y, pos_x)
__lowerCAmelCase: Union[str, Any] = goal_x
__lowerCAmelCase: Any = goal_y
__lowerCAmelCase: Dict = g_cost
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: List[str] = self.calculate_heuristic()
def lowercase_ ( self : Optional[Any])-> Dict:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = abs(self.pos_x - self.goal_x)
__lowerCAmelCase: int = abs(self.pos_y - self.goal_y)
return dx + dy
def __lt__( self : Optional[int] , UpperCamelCase__ : List[str])-> int:
'''simple docstring'''
return self.f_cost < other.f_cost
class snake_case :
def __init__( self : Any , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : tuple[int, int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowercase)
__lowerCAmelCase: Union[str, Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , __lowercase)
__lowerCAmelCase: Union[str, Any] = [self.start]
__lowerCAmelCase: str = []
__lowerCAmelCase: Any = False
def lowercase_ ( self : str)-> Union[str, Any]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase: List[Any] = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
__lowerCAmelCase: str = True
return self.retrace_path(__lowercase)
self.closed_nodes.append(__lowercase)
__lowerCAmelCase: Dict = self.get_successors(__lowercase)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowercase)
else:
# retrieve the best current path
__lowerCAmelCase: str = self.open_nodes.pop(self.open_nodes.index(__lowercase))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowercase)
else:
self.open_nodes.append(__lowercase)
if not self.reached:
return [self.start.pos]
return None
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Node)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = []
for action in delta:
__lowerCAmelCase: List[Any] = parent.pos_x + action[1]
__lowerCAmelCase: Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(__lowercase) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowercase , __lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowercase , ))
return successors
def lowercase_ ( self : Any , UpperCamelCase__ : Node | None)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Dict = node
__lowerCAmelCase: Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__lowerCAmelCase: str = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A = (0, 0)
__A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
__A = GreedyBestFirst(init, goal)
__A = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A = 2
for elem in grid:
print(elem)
| 346 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
@add_end_docstrings(A )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "generated"
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def snake_case ( self : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : List[str]=None , __lowercase : int=None , __lowercase : str=None , __lowercase : List[Any]=None , **__lowercase : Union[str, Any] , ):
"""simple docstring"""
__lowercase ={}
if truncation is not None:
__lowercase =truncation
__lowercase =generate_kwargs
__lowercase ={}
if return_tensors is not None and return_type is None:
__lowercase =ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__lowercase =return_type
if clean_up_tokenization_spaces is not None:
__lowercase =clean_up_tokenization_spaces
if stop_sequence is not None:
__lowercase =self.tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
if len(__lowercase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowercase =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case ( self : List[Any] , __lowercase : int , __lowercase : int , __lowercase : int ):
"""simple docstring"""
return True
def snake_case ( self : Optional[Any] , *__lowercase : Optional[int] , __lowercase : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , __lowercase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
__lowercase =([prefix + arg for arg in args[0]],)
__lowercase =True
elif isinstance(args[0] , __lowercase ):
__lowercase =(prefix + args[0],)
__lowercase =False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
__lowercase =self.tokenizer(*__lowercase , padding=__lowercase , truncation=__lowercase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Any , *__lowercase : int , **__lowercase : Optional[Any] ):
"""simple docstring"""
__lowercase =super().__call__(*__lowercase , **__lowercase )
if (
isinstance(args[0] , __lowercase )
and all(isinstance(__lowercase , __lowercase ) for el in args[0] )
and all(len(__lowercase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def snake_case ( self : List[str] , __lowercase : Tuple , __lowercase : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **__lowercase : Dict ):
"""simple docstring"""
__lowercase =self._parse_and_tokenize(__lowercase , truncation=__lowercase , **__lowercase )
return inputs
def snake_case ( self : Optional[int] , __lowercase : Tuple , **__lowercase : Tuple ):
"""simple docstring"""
if self.framework == "pt":
__lowercase , __lowercase =model_inputs['input_ids'].shape
elif self.framework == "tf":
__lowercase , __lowercase =tf.shape(model_inputs['input_ids'] ).numpy()
__lowercase =generate_kwargs.get('min_length' , self.model.config.min_length )
__lowercase =generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(__lowercase , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
__lowercase =self.model.generate(**__lowercase , **__lowercase )
__lowercase =output_ids.shape[0]
if self.framework == "pt":
__lowercase =output_ids.reshape(__lowercase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__lowercase =tf.reshape(__lowercase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def snake_case ( self : str , __lowercase : Optional[Any] , __lowercase : Optional[int]=ReturnType.TEXT , __lowercase : Union[str, Any]=False ):
"""simple docstring"""
__lowercase =[]
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__lowercase ={f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
__lowercase ={
f'''{self.return_name}_text''': self.tokenizer.decode(
__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase , )
}
records.append(__lowercase )
return records
@add_end_docstrings(A )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "summary"
def __call__( self : Dict , *__lowercase : Dict , **__lowercase : Dict ):
"""simple docstring"""
return super().__call__(*__lowercase , **__lowercase )
def snake_case ( self : str , __lowercase : int , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(A )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "translation"
def snake_case ( self : int , __lowercase : int , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def snake_case ( self : int , *__lowercase : str , __lowercase : List[str]=TruncationStrategy.DO_NOT_TRUNCATE , __lowercase : List[Any]=None , __lowercase : Optional[int]=None ):
"""simple docstring"""
if getattr(self.tokenizer , '_build_translation_inputs' , __lowercase ):
return self.tokenizer._build_translation_inputs(
*__lowercase , return_tensors=self.framework , truncation=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase )
else:
return super()._parse_and_tokenize(*__lowercase , truncation=__lowercase )
def snake_case ( self : Optional[int] , __lowercase : List[str]=None , __lowercase : Optional[Any]=None , **__lowercase : int ):
"""simple docstring"""
__lowercase , __lowercase , __lowercase =super()._sanitize_parameters(**__lowercase )
if src_lang is not None:
__lowercase =src_lang
if tgt_lang is not None:
__lowercase =tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__lowercase =kwargs.get('task' , self.task )
__lowercase =task.split('_' )
if task and len(__lowercase ) == 4:
# translation, XX, to YY
__lowercase =items[1]
__lowercase =items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[Any] , *__lowercase : Any , **__lowercase : List[Any] ):
"""simple docstring"""
return super().__call__(*__lowercase , **__lowercase )
| 119 | 0 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a : int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase( self ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase( self , A , A , A=None , A="uniform_average" , A=True ) -> List[Any]:
UpperCAmelCase : List[Any] = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A )
return {"mse": mse}
| 672 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a = logging.get_logger(__name__)
a = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
a = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
a = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE = json.loads(f.read() )
__SCREAMING_SNAKE_CASE = collections.OrderedDict()
__SCREAMING_SNAKE_CASE = collections.OrderedDict()
__SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = b
__SCREAMING_SNAKE_CASE = idx
for wd in b:
__SCREAMING_SNAKE_CASE = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __a ( _snake_case ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : Dict ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : int ,lowerCamelCase : str="<|endoftext|>" ,lowerCamelCase : List[Any]="<|endoftext|>" ,lowerCamelCase : str="<|startoftext|>" ,lowerCamelCase : Dict="<|endoftext|>" ,lowerCamelCase : int=False ,**lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase ,pad_token=lowerCamelCase ,bos_token=lowerCamelCase ,eos_token=lowerCamelCase ,do_clean_text=lowerCamelCase ,**lowerCamelCase ,)
if not os.path.isfile(lowerCamelCase ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__SCREAMING_SNAKE_CASE = do_clean_text
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_vocab_and_emoji(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.raw_vocab )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : str ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(lowerCamelCase ,clean=self.do_clean_text )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.vocab.get(lowerCamelCase ,self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase ).strip()
return out_string
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : "Conversation" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : str ,lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
if os.path.isdir(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__SCREAMING_SNAKE_CASE = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__SCREAMING_SNAKE_CASE = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
__SCREAMING_SNAKE_CASE = token_index
writer.write(""",""".join(lowerCamelCase ) + """\n""" )
index += 1
with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
json.dump(self.emoji ,lowerCamelCase )
return vocab_file, emoji_file
class __a ( _snake_case ):
def __init__( self : Tuple ,lowerCamelCase : List[str] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab # same as swe
__SCREAMING_SNAKE_CASE = ids_to_tokens # same as bpe
__SCREAMING_SNAKE_CASE = emoji
__SCREAMING_SNAKE_CASE = np.max([len(lowerCamelCase ) for w in self.vocab.keys()] )
__SCREAMING_SNAKE_CASE = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__SCREAMING_SNAKE_CASE = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__SCREAMING_SNAKE_CASE = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__SCREAMING_SNAKE_CASE = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__SCREAMING_SNAKE_CASE = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__SCREAMING_SNAKE_CASE = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__SCREAMING_SNAKE_CASE = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__SCREAMING_SNAKE_CASE = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__SCREAMING_SNAKE_CASE = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.ids_to_tokens )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<URL>""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<EMAIL>""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<TEL>""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<DATE>""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<DATE>""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<PRICE>""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__SCREAMING_SNAKE_CASE = content.replace("""<BLOCK><BLOCK>""" ,"""<BLOCK>""" )
return content
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[str]=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = text.replace(""" """ ,"""<SP>""" )
__SCREAMING_SNAKE_CASE = text.replace(""" """ ,"""<SP>""" )
__SCREAMING_SNAKE_CASE = text.replace("""\r\n""" ,"""<BR>""" )
__SCREAMING_SNAKE_CASE = text.replace("""\n""" ,"""<BR>""" )
__SCREAMING_SNAKE_CASE = text.replace("""\r""" ,"""<BR>""" )
__SCREAMING_SNAKE_CASE = text.replace("""\t""" ,"""<TAB>""" )
__SCREAMING_SNAKE_CASE = text.replace("""—""" ,"""ー""" )
__SCREAMING_SNAKE_CASE = text.replace("""−""" ,"""ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__SCREAMING_SNAKE_CASE = text.replace(lowerCamelCase ,lowerCamelCase )
if clean:
__SCREAMING_SNAKE_CASE = self.clean_text(lowerCamelCase )
def check_simbol(lowerCamelCase : List[str] ):
__SCREAMING_SNAKE_CASE = x.encode()
if len(lowerCamelCase ) == 1 and len(lowerCamelCase ) == 2:
__SCREAMING_SNAKE_CASE = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC_2A1 and c <= 0xC_2BF)
or (c >= 0xC_780 and c <= 0xC_783)
or (c >= 0xC_AB9 and c <= 0xC_BBF)
or (c >= 0xC_C80 and c <= 0xC_DA2)
):
return True
return False
def checkuae(lowerCamelCase : Union[str, Any] ):
__SCREAMING_SNAKE_CASE = x.encode()
if len(lowerCamelCase ) == 1 and len(lowerCamelCase ) == 3:
__SCREAMING_SNAKE_CASE = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE28_080 and c <= 0xE2B_07F:
return True
return False
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = []
while pos < len(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = min(len(lowerCamelCase ) ,pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__SCREAMING_SNAKE_CASE = [] # (token_id, token, pos)
for e in range(lowerCamelCase ,lowerCamelCase ,-1 ):
__SCREAMING_SNAKE_CASE = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCamelCase ) > 2:
__SCREAMING_SNAKE_CASE = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCamelCase ) > 0:
# the smallest token_id is adopted
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sorted(lowerCamelCase ,key=lambda lowerCamelCase : x[0] )[0]
result.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = e
else:
__SCREAMING_SNAKE_CASE = pos + 1
__SCREAMING_SNAKE_CASE = text[pos:end]
if check_simbol(lowerCamelCase ):
result.append("""<KIGOU>""" )
elif checkuae(lowerCamelCase ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__SCREAMING_SNAKE_CASE = end
return result
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any]="\n" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCamelCase ) > 0:
words.append(bytearray(lowerCamelCase ).decode("""utf-8""" ,errors="""replace""" ) )
__SCREAMING_SNAKE_CASE = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(lowerCamelCase )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
words.append(bytearray(lowerCamelCase ).decode("""utf-8""" ,errors="""replace""" ) )
__SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase )
return text
| 109 | 1 |
def _A ( _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : Tuple = len(_UpperCamelCase )
_UpperCAmelCase : Tuple = len(_UpperCamelCase )
_UpperCAmelCase : Dict = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_UpperCAmelCase : List[Any] = True
for i in range(_UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_UpperCAmelCase : List[Any] = True
if a[i].islower():
_UpperCAmelCase : str = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 416 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase_ :
def __init__( self : Any , UpperCAmelCase_ : Collection[float] | None = None ) -> None:
'''simple docstring'''
if components is None:
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = list(UpperCAmelCase_ )
def __len__( self : Tuple ) -> int:
'''simple docstring'''
return len(self.__components )
def __str__( self : List[Any] ) -> str:
'''simple docstring'''
return "(" + ",".join(map(UpperCAmelCase_ , self.__components ) ) + ")"
def __add__( self : List[str] , UpperCAmelCase_ : Vector ) -> Vector:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = len(self )
if size == len(UpperCAmelCase_ ):
_UpperCAmelCase : List[str] = [self.__components[i] + other.component(UpperCAmelCase_ ) for i in range(UpperCAmelCase_ )]
return Vector(UpperCAmelCase_ )
else:
raise Exception('''must have the same size''' )
def __sub__( self : Optional[int] , UpperCAmelCase_ : Vector ) -> Vector:
'''simple docstring'''
_UpperCAmelCase : str = len(self )
if size == len(UpperCAmelCase_ ):
_UpperCAmelCase : Any = [self.__components[i] - other.component(UpperCAmelCase_ ) for i in range(UpperCAmelCase_ )]
return Vector(UpperCAmelCase_ )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self : List[Any] , UpperCAmelCase_ : float ) -> Vector:
'''simple docstring'''
...
@overload
def __mul__( self : Dict , UpperCAmelCase_ : Vector ) -> float:
'''simple docstring'''
...
def __mul__( self : Dict , UpperCAmelCase_ : float | Vector ) -> float | Vector:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (float, int) ):
_UpperCAmelCase : Dict = [c * other for c in self.__components]
return Vector(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(self ) == len(UpperCAmelCase_ ):
_UpperCAmelCase : Optional[int] = len(self )
_UpperCAmelCase : Optional[Any] = [self.__components[i] * other.component(UpperCAmelCase_ ) for i in range(UpperCAmelCase_ )]
return sum(UpperCAmelCase_ )
else: # error case
raise Exception('''invalid operand!''' )
def a_ ( self : Optional[int] ) -> Vector:
'''simple docstring'''
return Vector(self.__components )
def a_ ( self : Optional[int] , UpperCAmelCase_ : int ) -> float:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def a_ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : float ) -> None:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
_UpperCAmelCase : Optional[Any] = value
def a_ ( self : List[str] ) -> float:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
_UpperCAmelCase : str = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCAmelCase_ ) )
def a_ ( self : Optional[int] , UpperCAmelCase_ : Vector , UpperCAmelCase_ : bool = False ) -> float:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self * other
_UpperCAmelCase : Union[str, Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _A ( _UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase )
return Vector([0] * dimension )
def _A ( _UpperCamelCase , _UpperCamelCase ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (isinstance(_UpperCamelCase , _UpperCamelCase ))
_UpperCAmelCase : Optional[Any] = [0] * dimension
_UpperCAmelCase : Optional[Any] = 1
return Vector(_UpperCamelCase )
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and isinstance(_UpperCamelCase , _UpperCamelCase )
and (isinstance(_UpperCamelCase , (int, float) ))
)
return x * scalar + y
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
random.seed(_UpperCamelCase )
_UpperCAmelCase : Tuple = [random.randint(_UpperCamelCase , _UpperCamelCase ) for _ in range(_UpperCamelCase )]
return Vector(_UpperCamelCase )
class lowerCAmelCase_ :
def __init__( self : int , UpperCAmelCase_ : list[list[float]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> None:
'''simple docstring'''
_UpperCAmelCase : Tuple = matrix
_UpperCAmelCase : Tuple = w
_UpperCAmelCase : Tuple = h
def __str__( self : str ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : str , UpperCAmelCase_ : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCAmelCase : Optional[int] = []
for i in range(self.__height ):
_UpperCAmelCase : Union[str, Any] = [
self.__matrix[i][j] + other.component(UpperCAmelCase_ , UpperCAmelCase_ )
for j in range(self.__width )
]
matrix.append(UpperCAmelCase_ )
return Matrix(UpperCAmelCase_ , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self : Dict , UpperCAmelCase_ : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCAmelCase : Optional[Any] = []
for i in range(self.__height ):
_UpperCAmelCase : Union[str, Any] = [
self.__matrix[i][j] - other.component(UpperCAmelCase_ , UpperCAmelCase_ )
for j in range(self.__width )
]
matrix.append(UpperCAmelCase_ )
return Matrix(UpperCAmelCase_ , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self : int , UpperCAmelCase_ : float ) -> Matrix:
'''simple docstring'''
...
@overload
def __mul__( self : Optional[Any] , UpperCAmelCase_ : Vector ) -> Vector:
'''simple docstring'''
...
def __mul__( self : List[str] , UpperCAmelCase_ : float | Vector ) -> Vector | Matrix:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # matrix-vector
if len(UpperCAmelCase_ ) == self.__width:
_UpperCAmelCase : int = zero_vector(self.__height )
for i in range(self.__height ):
_UpperCAmelCase : Optional[int] = [
self.__matrix[i][j] * other.component(UpperCAmelCase_ )
for j in range(self.__width )
]
ans.change_component(UpperCAmelCase_ , sum(UpperCAmelCase_ ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(UpperCAmelCase_ , (int, float) ): # matrix-scalar
_UpperCAmelCase : str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCAmelCase_ , self.__width , self.__height )
return None
def a_ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.__height
def a_ ( self : Tuple ) -> int:
'''simple docstring'''
return self.__width
def a_ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def a_ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float ) -> None:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
_UpperCAmelCase : Dict = value
else:
raise Exception('''change_component: indices out of bounds''' )
def a_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
_UpperCAmelCase : Any = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCAmelCase_ ) ):
_UpperCAmelCase : Dict = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCAmelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def a_ ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCAmelCase_ , UpperCAmelCase_ )
else:
raise Exception('''Indices out of bounds''' )
def a_ ( self : Optional[int] ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_UpperCAmelCase : Optional[int] = [
self.__matrix[0][y] * self.cofactor(0 , UpperCAmelCase_ ) for y in range(self.__width )
]
return sum(UpperCAmelCase_ )
def _A ( _UpperCamelCase ):
_UpperCAmelCase : list[list[float]] = [[0] * n for _ in range(_UpperCamelCase )]
return Matrix(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
random.seed(_UpperCamelCase )
_UpperCAmelCase : list[list[float]] = [
[random.randint(_UpperCamelCase , _UpperCamelCase ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )
]
return Matrix(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
| 416 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = 42
class lowerCAmelCase_ ( __lowercase, __lowercase ):
@register_to_config
def __init__( self : int , _A : int = 32 , _A : int = 64 , _A : int = 20 , _A : int = 768 , _A : Union[str, Any]=77 , _A : Optional[Any]=4 , _A : float = 0.0 , _A : str = "silu" , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[str] = "linear" , _A : Optional[str] = "prd" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[int] = None , ):
super().__init__()
_UpperCamelCase = num_attention_heads
_UpperCamelCase = attention_head_dim
_UpperCamelCase = num_attention_heads * attention_head_dim
_UpperCamelCase = additional_embeddings
_UpperCamelCase = time_embed_dim or inner_dim
_UpperCamelCase = embedding_proj_dim or embedding_dim
_UpperCamelCase = clip_embed_dim or embedding_dim
_UpperCamelCase = Timesteps(_A , _A , 0 )
_UpperCamelCase = TimestepEmbedding(_A , _A , out_dim=_A , act_fn=_A )
_UpperCamelCase = nn.Linear(_A , _A )
if embedding_proj_norm_type is None:
_UpperCamelCase = None
elif embedding_proj_norm_type == "layer":
_UpperCamelCase = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_UpperCamelCase = nn.Linear(_A , _A )
if encoder_hid_proj_type is None:
_UpperCamelCase = None
elif encoder_hid_proj_type == "linear":
_UpperCamelCase = nn.Linear(_A , _A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_UpperCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _A ) )
if added_emb_type == "prd":
_UpperCamelCase = nn.Parameter(torch.zeros(1 , 1 , _A ) )
elif added_emb_type is None:
_UpperCamelCase = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , activation_fn='''gelu''' , attention_bias=_A , )
for d in range(_A )
] )
if norm_in_type == "layer":
_UpperCamelCase = nn.LayerNorm(_A )
elif norm_in_type is None:
_UpperCamelCase = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_UpperCamelCase = nn.LayerNorm(_A )
_UpperCamelCase = nn.Linear(_A , _A )
_UpperCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
_UpperCamelCase = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , _A , persistent=_A )
_UpperCamelCase = nn.Parameter(torch.zeros(1 , _A ) )
_UpperCamelCase = nn.Parameter(torch.zeros(1 , _A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = {}
def fn_recursive_add_processors(_A : str , _A : torch.nn.Module , _A : Dict[str, AttentionProcessor] ):
if hasattr(_A , '''set_processor''' ):
_UpperCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , _A , _A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A , _A , _A )
return processors
def UpperCamelCase_ ( self : Optional[Any] , _A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
_UpperCamelCase = len(self.attn_processors.keys() )
if isinstance(_A , _A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A : str , _A : torch.nn.Module , _A : List[str] ):
if hasattr(_A , '''set_processor''' ):
if not isinstance(_A , _A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , _A , _A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A , _A , _A )
def UpperCamelCase_ ( self : Union[str, Any] ):
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : Union[torch.Tensor, float, int] , _A : torch.FloatTensor , _A : Optional[torch.FloatTensor] = None , _A : Optional[torch.BoolTensor] = None , _A : bool = True , ):
_UpperCamelCase = hidden_states.shape[0]
_UpperCamelCase = timestep
if not torch.is_tensor(_A ):
_UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCamelCase = timesteps * torch.ones(_A , dtype=timesteps.dtype , device=timesteps.device )
_UpperCamelCase = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_UpperCamelCase = timesteps_projected.to(dtype=self.dtype )
_UpperCamelCase = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_UpperCamelCase = self.embedding_proj_norm(_A )
_UpperCamelCase = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_UpperCamelCase = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
_UpperCamelCase = self.proj_in(_A )
_UpperCamelCase = self.positional_embedding.to(hidden_states.dtype )
_UpperCamelCase = []
_UpperCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_UpperCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_UpperCamelCase = hidden_states[:, None, :]
_UpperCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_UpperCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(_A , -1 , -1 )
additional_embeds.append(_A )
_UpperCamelCase = torch.cat(
_A , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_UpperCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_UpperCamelCase = F.pad(
_A , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_UpperCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
_UpperCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
_UpperCamelCase = F.pad(_A , (0, self.additional_embeddings) , value=0.0 )
_UpperCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_UpperCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_UpperCamelCase = self.norm_in(_A )
for block in self.transformer_blocks:
_UpperCamelCase = block(_A , attention_mask=_A )
_UpperCamelCase = self.norm_out(_A )
if self.prd_embedding is not None:
_UpperCamelCase = hidden_states[:, -1]
else:
_UpperCamelCase = hidden_states[:, additional_embeddings_len:]
_UpperCamelCase = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def UpperCamelCase_ ( self : List[Any] , _A : Dict ):
_UpperCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 10 |
def lowerCamelCase__ ( snake_case_ : int = 1000 ) -> int:
__snake_case = 2**power
__snake_case = str(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = 0
for i in list_num:
sum_of_num += int(snake_case_ )
return sum_of_num
if __name__ == "__main__":
snake_case_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case_ = solution(power)
print('Sum of the digits is: ', result)
| 592 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
A__ = False
try:
A__ = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class __UpperCamelCase :
def __init__( self: List[Any] , __UpperCamelCase: str = None , __UpperCamelCase: list = [] ):
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = choices
__magic_name__ = prompt
if sys.platform == "win32":
__magic_name__ = '*'
else:
__magic_name__ = '➔ '
def _SCREAMING_SNAKE_CASE ( self: Tuple , __UpperCamelCase: List[str] , __UpperCamelCase: str = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __UpperCamelCase )
else:
forceWrite(self.choices[index] , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[str] , __UpperCamelCase: int ):
'''simple docstring'''
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(__UpperCamelCase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _SCREAMING_SNAKE_CASE ( self: List[Any] , __UpperCamelCase: Direction , __UpperCamelCase: int = 1 ):
'''simple docstring'''
__magic_name__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__UpperCamelCase )
move_cursor(__UpperCamelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__UpperCamelCase )] for number in range(10 )] )
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = int(chr(self.current_selection ) )
__magic_name__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __UpperCamelCase )
else:
return
else:
return
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , __UpperCamelCase: int = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__magic_name__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__UpperCamelCase )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__magic_name__ = int(builtins.input() )
except ValueError:
__magic_name__ = default_choice
else:
__magic_name__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(__UpperCamelCase , '\n' )
return choice
| 184 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : str = DownBlockaD # noqa F405
_lowercase : Union[str, Any] = "down"
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Union[str, Any] = "down"
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
__magic_name__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Dict = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = "down"
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : int = CrossAttnDownBlockaD # noqa F405
_lowercase : Any = "down"
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : List[str] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : str = SkipDownBlockaD # noqa F405
_lowercase : Union[str, Any] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Tuple = AttnSkipDownBlockaD # noqa F405
_lowercase : str = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Optional[int] = DownEncoderBlockaD # noqa F405
_lowercase : List[str] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = {
'in_channels': 32,
'out_channels': 32,
}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[Any] = AttnDownEncoderBlockaD # noqa F405
_lowercase : Optional[Any] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = {
'in_channels': 32,
'out_channels': 32,
}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[Any] = UNetMidBlockaD # noqa F405
_lowercase : Any = "mid"
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = {
'in_channels': 32,
'temb_channels': 1_28,
}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : str = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : int = "mid"
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = "mid"
@property
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[Any] = UpBlockaD # noqa F405
_lowercase : List[Any] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[Any] = ResnetUpsampleBlockaD # noqa F405
_lowercase : Dict = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : Union[str, Any] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : str = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Tuple = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase , include_encoder_hidden_states=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common()
__magic_name__ = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Optional[Any] = AttnUpBlockaD # noqa F405
_lowercase : Optional[int] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Union[str, Any] = SkipUpBlockaD # noqa F405
_lowercase : int = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
_lowercase : Optional[Any] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : List[str] = UpDecoderBlockaD # noqa F405
_lowercase : List[str] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = {'in_channels': 32, 'out_channels': 32}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__UpperCamelCase )
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Optional[Any] = AttnUpDecoderBlockaD # noqa F405
_lowercase : Any = "up"
@property
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__ = {'in_channels': 32, 'out_channels': 32}
__magic_name__ = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__UpperCamelCase )
| 184 | 1 |
import re
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : Optional[int] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
lowerCamelCase ="0094702343221"
print(is_sri_lankan_phone_number(phone))
| 285 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=[3_0, 3_0] , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=1_0 , ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Any = parent
UpperCamelCase__ : Optional[Any] = batch_size
UpperCamelCase__ : str = image_size
UpperCamelCase__ : Union[str, Any] = patch_size
UpperCamelCase__ : Union[str, Any] = num_channels
UpperCamelCase__ : Tuple = is_training
UpperCamelCase__ : Optional[int] = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : int = intermediate_size
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : str = type_sequence_label_size
UpperCamelCase__ : str = initializer_range
UpperCamelCase__ : Dict = num_labels
UpperCamelCase__ : List[Any] = scope
UpperCamelCase__ : str = n_targets
UpperCamelCase__ : int = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCamelCase__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCamelCase__ : Any = num_patches + 1 + self.num_detection_tokens
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCamelCase__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCamelCase__ : Union[str, Any] = []
for i in range(self.batch_size ):
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : str = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = torch.rand(self.n_targets , 4 , device=__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = YolosModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = YolosForObjectDetection(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : str = model(pixel_values=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
UpperCamelCase__ : Optional[Any] = model(pixel_values=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : str = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = config_and_inputs
UpperCamelCase__ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : int = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCamelCase__ : List[Any] = []
for i in range(self.model_tester.batch_size ):
UpperCamelCase__ : Optional[int] = {}
UpperCamelCase__ : Union[str, Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__SCREAMING_SNAKE_CASE , dtype=torch.long )
UpperCamelCase__ : Tuple = torch.ones(
self.model_tester.n_targets , 4 , device=__SCREAMING_SNAKE_CASE , dtype=torch.float )
labels.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = labels
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Any = YolosModelTester(self )
UpperCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Any = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCamelCase__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : List[str] = True
# in YOLOS, the seq_len is different
UpperCamelCase__ : List[str] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : int = False
UpperCamelCase__ : Union[str, Any] = True
UpperCamelCase__ : str = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Tuple = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase__ : str = True
UpperCamelCase__ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[Any] = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCamelCase__ : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCamelCase__ : Optional[Any] = True
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Optional[int] = 1
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : int = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Tuple = outputs.hidden_states
UpperCamelCase__ : Tuple = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# YOLOS has a different seq_length
UpperCamelCase__ : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCamelCase__ ,UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : Any = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__SCREAMING_SNAKE_CASE )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Dict = YolosModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = self.default_image_processor
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase__ : int = model(inputs.pixel_values )
# verify outputs
UpperCamelCase__ : Dict = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Dict = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify postprocessing
UpperCamelCase__ : Any = image_processor.post_process_object_detection(
__SCREAMING_SNAKE_CASE , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
UpperCamelCase__ : List[Any] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = [7_5, 7_5, 1_7, 6_3, 1_7]
UpperCamelCase__ : List[str] = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __SCREAMING_SNAKE_CASE ) )
| 285 | 1 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
_SCREAMING_SNAKE_CASE = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
_SCREAMING_SNAKE_CASE = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
_SCREAMING_SNAKE_CASE = BeautifulSoup(res.text, 'html.parser')
_SCREAMING_SNAKE_CASE = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F'''https://google.com{link.get("href")}''')
| 718 |
import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 0 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
SCREAMING_SNAKE_CASE = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
SCREAMING_SNAKE_CASE = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
SCREAMING_SNAKE_CASE = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def A__ ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any=False ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =spearmanr(UpperCAmelCase , UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 94 |
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> list:
"""simple docstring"""
_UpperCAmelCase : Tuple = int(_UpperCAmelCase )
if n_element < 1:
_UpperCAmelCase : Tuple = ValueError("a should be a positive number" )
raise my_error
_UpperCAmelCase : Optional[Any] = [1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = (0, 0, 0)
_UpperCAmelCase : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__SCREAMING_SNAKE_CASE : Union[str, Any] = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 244 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 283 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
SCREAMING_SNAKE_CASE = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowercase : int = model_type_to_module_name(__UpperCAmelCase )
_lowercase : int = importlib.import_module(F'''.{module_name}''' ,'transformers.models' )
try:
return getattr(__UpperCAmelCase ,__UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__UpperCAmelCase ,'__name__' ,__UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowercase : List[str] = importlib.import_module('transformers' )
if hasattr(__UpperCAmelCase ,__UpperCAmelCase ):
return getattr(__UpperCAmelCase ,__UpperCAmelCase )
return None
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = False ,**__UpperCAmelCase ,):
"""simple docstring"""
_lowercase : Any = get_file_from_repo(
__UpperCAmelCase ,__UpperCAmelCase ,cache_dir=__UpperCAmelCase ,force_download=__UpperCAmelCase ,resume_download=__UpperCAmelCase ,proxies=__UpperCAmelCase ,use_auth_token=__UpperCAmelCase ,revision=__UpperCAmelCase ,local_files_only=__UpperCAmelCase ,)
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(__UpperCAmelCase ,encoding='utf-8' ) as reader:
return json.load(__UpperCAmelCase )
class _lowerCamelCase :
def __init__( self : List[Any] ):
"""simple docstring"""
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase_ )
def __UpperCAmelCase ( cls : Union[str, Any] , lowerCamelCase_ : str , **lowerCamelCase_ : int ):
"""simple docstring"""
_lowercase : Optional[Any] = kwargs.pop('config' , lowerCamelCase_ )
_lowercase : Optional[Any] = kwargs.pop('trust_remote_code' , lowerCamelCase_ )
_lowercase : Optional[int] = True
_lowercase , _lowercase : Dict = FeatureExtractionMixin.get_feature_extractor_dict(lowerCamelCase_ , **lowerCamelCase_ )
_lowercase : Dict = config_dict.get('feature_extractor_type' , lowerCamelCase_ )
_lowercase : str = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
_lowercase : int = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
# It could be in `config.feature_extractor_type``
_lowercase : Tuple = getattr(lowerCamelCase_ , 'feature_extractor_type' , lowerCamelCase_ )
if hasattr(lowerCamelCase_ , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
_lowercase : Optional[int] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
_lowercase : Optional[Any] = feature_extractor_class_from_name(lowerCamelCase_ )
_lowercase : int = feature_extractor_auto_map is not None
_lowercase : Dict = feature_extractor_class is not None or type(lowerCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING
_lowercase : List[str] = resolve_trust_remote_code(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if has_remote_code and trust_remote_code:
_lowercase : Any = get_class_from_dynamic_module(
lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
_lowercase : str = kwargs.pop('code_revision' , lowerCamelCase_ )
if os.path.isdir(lowerCamelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
_lowercase : List[str] = FEATURE_EXTRACTOR_MAPPING[type(lowerCamelCase_ )]
return feature_extractor_class.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(lowerCamelCase_ , lowerCamelCase_ )
| 283 | 1 |
import numpy as np
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
return 1 / (1 + np.exp(-vector ))
def UpperCAmelCase__( __UpperCAmelCase : int ):
return vector * sigmoid(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 576 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336 | 0 |
import os
def A_ ( ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(lowercase_ ) , 'num.txt' )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 259 |
import numpy as np
class a_:
"""simple docstring"""
def __init__( self : Any) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = (0, 0)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
def __eq__( self : int , lowerCAmelCase__ : List[Any]) -> Optional[int]:
"""simple docstring"""
return self.position == cell.position
def __UpperCamelCase ( self : int) -> int:
"""simple docstring"""
print(self.position)
class a_:
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str]=(5, 5)) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = np.zeros(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = world_size[0]
SCREAMING_SNAKE_CASE = world_size[1]
def __UpperCamelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
print(self.w)
def __UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Tuple) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
SCREAMING_SNAKE_CASE = cell.position[0]
SCREAMING_SNAKE_CASE = cell.position[1]
SCREAMING_SNAKE_CASE = []
for n in neughbour_cord:
SCREAMING_SNAKE_CASE = current_x + n[0]
SCREAMING_SNAKE_CASE = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
SCREAMING_SNAKE_CASE = Cell()
SCREAMING_SNAKE_CASE = (x, y)
SCREAMING_SNAKE_CASE = cell
neighbours.append(lowerCAmelCase__)
return neighbours
def A_ ( lowercase_ , lowercase_ , lowercase_ ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
_open.append(lowercase_ )
while _open:
SCREAMING_SNAKE_CASE = np.argmin([n.f for n in _open] )
SCREAMING_SNAKE_CASE = _open[min_f]
_closed.append(_open.pop(lowercase_ ) )
if current == goal:
break
for n in world.get_neigbours(lowercase_ ):
for c in _closed:
if c == n:
continue
SCREAMING_SNAKE_CASE = current.g + 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = n.position
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = goal.position
SCREAMING_SNAKE_CASE = (ya - ya) ** 2 + (xa - xa) ** 2
SCREAMING_SNAKE_CASE = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowercase_ )
SCREAMING_SNAKE_CASE = []
while current.parent is not None:
path.append(current.position )
SCREAMING_SNAKE_CASE = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__UpperCAmelCase = Gridworld()
# Start position and goal
__UpperCAmelCase = Cell()
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = Cell()
__UpperCAmelCase = (4, 4)
print(f'path from {start.position} to {goal.position}')
__UpperCAmelCase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__UpperCAmelCase = 1
print(world.w)
| 259 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_ = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=8 ):
__a : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__a : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ) -> Optional[Any]:
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__a : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __magic_name__ ( self , _A , _A , _A , _A , _A , _A ) -> Union[str, Any]:
if latents is None:
__a : int = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__a : Any = latents.to(_A )
__a : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def __magic_name__ ( self , _A=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__a : List[str] = torch.device(f'''cuda:{gpu_id}''' )
__a : List[str] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def __magic_name__ ( self , _A=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__a : Any = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__a : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
__a , __a : str = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__a : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __magic_name__ ( self ) -> int:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 512 , _A = 512 , _A = 100 , _A = 4.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , ) -> int:
__a : List[str] = self._execution_device
__a : str = guidance_scale > 1.0
if isinstance(_A , _A ):
__a : List[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__a : Optional[Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__a : int = torch.cat(_A , dim=0 )
__a : List[str] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__a : str = image_embeds.repeat_interleave(_A , dim=0 )
__a : Dict = negative_image_embeds.repeat_interleave(_A , dim=0 )
__a : str = hint.repeat_interleave(_A , dim=0 )
__a : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
__a : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
__a : int = self.scheduler.timesteps
__a : Tuple = self.movq.config.latent_channels
__a , __a : int = downscale_height_and_width(_A , _A , self.movq_scale_factor )
# create initial latent
__a : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__a : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Optional[int] = {'image_embeds': image_embeds, 'hint': hint}
__a : Optional[Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__a , __a : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
__a , __a : Optional[int] = noise_pred.chunk(2 )
__a , __a : Dict = variance_pred.chunk(2 )
__a : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__a : List[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__a , __a : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__a : str = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__a : int = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__a : List[Any] = image * 0.5 + 0.5
__a : Dict = image.clamp(0 , 1 )
__a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__a : int = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 597 |
'''simple docstring'''
from statistics import mean, stdev
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 3 ):
__a : List[str] = min(SCREAMING_SNAKE_CASE__ )
__a : Tuple = max(SCREAMING_SNAKE_CASE__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE__ ) for x in data]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 3 ):
__a : Dict = mean(SCREAMING_SNAKE_CASE__ )
__a : str = stdev(SCREAMING_SNAKE_CASE__ )
# standardize data
return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE__ ) for x in data]
| 597 | 1 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 705 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def lowerCAmelCase__ ( UpperCamelCase_ : Any )-> Union[str, Any]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"could not parse string as bool {string}" )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
_lowercase = parser.parse_args()
_lowercase = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 526 | 0 |
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a : Tuple = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
a : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS)
a : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a : Optional[Any] = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
a : Tuple = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = None
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(_A )
_UpperCAmelCase = _re_checkpoint.findall(_A )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
_UpperCAmelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = ckpt_name
break
return checkpoint
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_UpperCAmelCase = get_checkpoint_from_config_class(_A )
_UpperCAmelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_A )
if len(_A ) > 0:
_UpperCAmelCase = """\n""".join(sorted(_A ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 555 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a : Optional[Any] = None
a : str = logging.get_logger(__name__)
a : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a : List[str] = {
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
a : Optional[int] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class a_ ( _UpperCAmelCase ):
a : Optional[int] = VOCAB_FILES_NAMES
a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = ['input_ids', 'attention_mask']
a : str = MBartTokenizer
a : List[int] = []
a : List[int] = []
def __init__( self : str , __UpperCamelCase : List[str]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : int="</s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Any="<unk>" , __UpperCamelCase : Any="<pad>" , __UpperCamelCase : int="<mask>" , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : Optional[int] , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
vocab_file=__UpperCamelCase , tokenizer_file=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
_UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_UpperCAmelCase = {
lang_code: self.convert_tokens_to_ids(__UpperCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_UpperCAmelCase = src_lang if src_lang is not None else """en_XX"""
_UpperCAmelCase = self.convert_tokens_to_ids(self._src_lang )
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _snake_case ( self : int , __UpperCamelCase : str ) ->None:
'''simple docstring'''
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Optional[str] , __UpperCamelCase : Optional[str] , **__UpperCamelCase : Any ) ->Dict:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = self.convert_tokens_to_ids(__UpperCamelCase )
_UpperCAmelCase = tgt_lang_id
return inputs
def _snake_case ( self : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : str = "en_XX" , __UpperCamelCase : Optional[List[str]] = None , __UpperCamelCase : str = "ro_RO" , **__UpperCamelCase : Union[str, Any] , ) ->BatchEncoding:
'''simple docstring'''
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->int:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Dict ) ->None:
'''simple docstring'''
_UpperCAmelCase = self.convert_tokens_to_ids(__UpperCamelCase )
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
_UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self : List[str] , __UpperCamelCase : str ) ->None:
'''simple docstring'''
_UpperCAmelCase = self.convert_tokens_to_ids(__UpperCamelCase )
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
_UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 555 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCAmelCase = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def A__ ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__lowerCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def A__ ( self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCAmelCase = DisjunctiveConstraint(_A )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = dc.update(1 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = dc.update(2 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = dc.update(3 )
__lowerCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def A__ ( self ):
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCAmelCase = DisjunctiveConstraint(_A )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 102 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCamelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class snake_case_ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase =None
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , ):
"""simple docstring"""
import pyspark
def generate_fn():
__lowerCAmelCase = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__lowerCAmelCase = df_with_partition_id.select('*' ).where(F"""part_id = {partition_id}""" ).drop('part_id' )
__lowerCAmelCase = partition_df.collect()
__lowerCAmelCase = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class snake_case_ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , _A , _A=None , ):
__lowerCAmelCase = df
__lowerCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() )
__lowerCAmelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def A__ ( self , _A ):
__lowerCAmelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_A )
return SparkExamplesIterable(self.df , partition_order=_A )
def A__ ( self , _A , _A ):
__lowerCAmelCase = self.split_shard_indices_by_worker(_A , _A )
return SparkExamplesIterable(self.df , partition_order=_A )
@property
def A__ ( self ):
return len(self.partition_order )
class snake_case_ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase =SparkConfig
def __init__( self , _A , _A = None , _A = None , **_A , ):
import pyspark
__lowerCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate()
__lowerCAmelCase = df
__lowerCAmelCase = working_dir
super().__init__(
cache_dir=_A , config_name=str(self.df.semanticHash() ) , **_A , )
def A__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(_A ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=_A )
__lowerCAmelCase = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_A , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowerCAmelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def A__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def A__ ( self , _A ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def A__ ( self , _A ):
import pyspark
def get_arrow_batch_size(_A ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__lowerCAmelCase = self.df.count()
__lowerCAmelCase = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowerCAmelCase = (
self.df.limit(_A )
.repartition(1 )
.mapInArrow(_A , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowerCAmelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowerCAmelCase = min(_A , int(approx_total_size / max_shard_size ) )
__lowerCAmelCase = self.df.repartition(_A )
def A__ ( self , _A , _A , _A , ):
import pyspark
__lowerCAmelCase = ParquetWriter if file_format == 'parquet' else ArrowWriter
__lowerCAmelCase = os.path.join(self._working_dir , os.path.basename(_A ) ) if self._working_dir else fpath
__lowerCAmelCase = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowerCAmelCase = self.config.features
__lowerCAmelCase = self._writer_batch_size
__lowerCAmelCase = self._fs.storage_options
def write_arrow(_A ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowerCAmelCase = pyspark.TaskContext().taskAttemptId()
__lowerCAmelCase = next(_A , _A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__lowerCAmelCase = 0
__lowerCAmelCase = writer_class(
features=_A , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
__lowerCAmelCase = pa.Table.from_batches([first_batch] )
writer.write_table(_A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowerCAmelCase, __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__lowerCAmelCase = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
__lowerCAmelCase = pa.Table.from_batches([batch] )
writer.write_table(_A )
if writer._num_bytes > 0:
__lowerCAmelCase, __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_A ) ):
__lowerCAmelCase = os.path.join(os.path.dirname(_A ) , os.path.basename(_A ) )
shutil.move(_A , _A )
__lowerCAmelCase = (
self.df.mapInArrow(_A , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def A__ ( self , _A , _A = "arrow" , _A = None , _A = None , **_A , ):
self._validate_cache_dir()
__lowerCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_A )
__lowerCAmelCase = not is_remote_filesystem(self._fs )
__lowerCAmelCase = os.path.join if is_local else posixpath.join
__lowerCAmelCase = '-TTTTT-SSSSS-of-NNNNN'
__lowerCAmelCase = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
__lowerCAmelCase = path_join(self._output_dir , _A )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = []
for task_id, content in self._prepare_split_single(_A , _A , _A ):
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_A )
__lowerCAmelCase = total_num_examples
__lowerCAmelCase = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
__lowerCAmelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowerCAmelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_A , _A , _A , ):
rename(
_A , fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , F"""{global_shard_id:05d}""" ).replace('NNNNN' , F"""{total_shards:05d}""" ) , )
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i in range(len(_A ) ):
__lowerCAmelCase, __lowerCAmelCase = task_id_and_num_shards[i]
for shard_id in range(_A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_A , len(_A ) ).map(lambda _A : _rename_shard(*_A ) ).collect()
else:
# don't use any pattern
__lowerCAmelCase = 0
__lowerCAmelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace(_A , '' ) , )
def A__ ( self , _A , ):
return SparkExamplesIterable(self.df )
| 102 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case :Tuple ='https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowerCamelCase_ ( lowerCAmelCase__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
A = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
A = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
A = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 106 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= ''
A__= 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : List[str] , _lowercase : Optional[DatasetInfo] = None , _lowercase : Optional[str] = None , **_lowercase : Optional[Any] , ):
"""simple docstring"""
super().__init__(self , **_lowercase )
UpperCAmelCase__ = repo_info
UpperCAmelCase__ = token
UpperCAmelCase__ = None
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase__ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(_lowercase ): {"name": str(_lowercase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _UpperCAmelCase ( self : str , _lowercase : str , _lowercase : str = "rb" , **_lowercase : int , ):
"""simple docstring"""
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase__ = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : Dict , **_lowercase : Optional[int] ):
"""simple docstring"""
self._get_dirs()
UpperCAmelCase__ = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def _UpperCAmelCase ( self : int , _lowercase : Optional[Any] , _lowercase : Any=False , **_lowercase : int ):
"""simple docstring"""
self._get_dirs()
UpperCAmelCase__ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ = p.parent
if root == path:
UpperCAmelCase__ = f
UpperCAmelCase__ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 475 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
debug_launcher(test_script.main )
def UpperCAmelCase_ ( self ):
debug_launcher(test_ops.main )
| 77 | 0 |
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCAmelCase__ ( a , a ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int = 1_000 , __SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None ) -> str:
"""simple docstring"""
self.set_timesteps(__SCREAMING_SNAKE_CASE )
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__SCREAMING_SNAKE_CASE = 4
# running values
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = num_inference_steps
__SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__SCREAMING_SNAKE_CASE = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__SCREAMING_SNAKE_CASE = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = torch.sin(steps * math.pi / 2 ) ** 2
__SCREAMING_SNAKE_CASE = (1.0 - self.betas**2) ** 0.5
__SCREAMING_SNAKE_CASE = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__SCREAMING_SNAKE_CASE = timesteps.to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__SCREAMING_SNAKE_CASE = (self.timesteps == timestep).nonzero().item()
__SCREAMING_SNAKE_CASE = timestep_index + 1
__SCREAMING_SNAKE_CASE = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__SCREAMING_SNAKE_CASE )
if len(self.ets ) == 1:
__SCREAMING_SNAKE_CASE = self.ets[-1]
elif len(self.ets ) == 2:
__SCREAMING_SNAKE_CASE = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__SCREAMING_SNAKE_CASE = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__SCREAMING_SNAKE_CASE = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__SCREAMING_SNAKE_CASE = self._get_prev_sample(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : torch.FloatTensor , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.alphas[timestep_index]
__SCREAMING_SNAKE_CASE = self.betas[timestep_index]
__SCREAMING_SNAKE_CASE = self.alphas[prev_timestep_index]
__SCREAMING_SNAKE_CASE = self.betas[prev_timestep_index]
__SCREAMING_SNAKE_CASE = (sample - sigma * ets) / max(__SCREAMING_SNAKE_CASE , 1E-8 )
__SCREAMING_SNAKE_CASE = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 627 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCAmelCase : Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
with open(a__ , """r""" ) as file:
for line_number, line in enumerate(a__ ):
__SCREAMING_SNAKE_CASE = line.strip()
if line:
__SCREAMING_SNAKE_CASE = line.split()
__SCREAMING_SNAKE_CASE = line_number
__SCREAMING_SNAKE_CASE = words[0]
__SCREAMING_SNAKE_CASE = value
return result
def a__ ( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ )
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a__ ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ ).shape
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ )
__SCREAMING_SNAKE_CASE = shape_pointer.shape
# let's reduce dimension
__SCREAMING_SNAKE_CASE = value[0]
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(a__ , a__ )
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a__ ( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a__ ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, hf_param_name] )
else:
__SCREAMING_SNAKE_CASE = key
__SCREAMING_SNAKE_CASE = value if """lm_head""" in full_key else value[0]
UpperCAmelCase : Any = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def a__ ( a__ , a__ , a__=None , a__=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = False
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(a__ )[0].split(""".""" )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , a__ )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = """weight_v"""
elif "bias" in name:
__SCREAMING_SNAKE_CASE = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__SCREAMING_SNAKE_CASE = """weight"""
else:
__SCREAMING_SNAKE_CASE = None
if hf_dict is not None:
rename_dict(a__ , a__ , a__ , a__ , a__ )
else:
set_recursively(a__ , a__ , a__ , a__ , a__ )
return is_used
return is_used
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == """group""" , )
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = load_wavaveca_layer(a__ , a__ , a__ )
if not is_used:
unused_weights.append(a__ )
logger.warning(F'Unused weights: {unused_weights}' )
def a__ ( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
__SCREAMING_SNAKE_CASE = name.split(""".""" )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(a__ )
@torch.no_grad()
def a__ ( a__ , a__ , a__=None , a__=None , a__=True , a__=False ):
"""simple docstring"""
if config_path is not None:
__SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(a__ )
else:
__SCREAMING_SNAKE_CASE = WavaVecaConfig()
if is_seq_class:
__SCREAMING_SNAKE_CASE = read_txt_into_dict(a__ )
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = WavaVecaForSequenceClassification(a__ )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
feature_extractor.save_pretrained(a__ )
elif is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE = Dictionary.load(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE = target_dict.pad_index
__SCREAMING_SNAKE_CASE = target_dict.bos_index
__SCREAMING_SNAKE_CASE = target_dict.eos_index
__SCREAMING_SNAKE_CASE = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE = os.path.join(a__ , """vocab.json""" )
if not os.path.isdir(a__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(a__ ) )
return
os.makedirs(a__ , exist_ok=a__ )
__SCREAMING_SNAKE_CASE = target_dict.indices
# fairseq has the <pad> and <s> switched
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(a__ , a__ )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=a__ , )
__SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
__SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
processor.save_pretrained(a__ )
__SCREAMING_SNAKE_CASE = WavaVecaForCTC(a__ )
else:
__SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(a__ )
if is_finetuned or is_seq_class:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE = argparse.Namespace(task="""audio_pretraining""" )
__SCREAMING_SNAKE_CASE = fairseq.tasks.setup_task(a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a__ )
__SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(a__ , a__ , not is_finetuned )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCAmelCase : Optional[Any] = parser.parse_args()
UpperCAmelCase : int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 627 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Optional[int] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = XLMRobertaTokenizer
UpperCamelCase__ = XLMRobertaTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def _A( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase =XLMRobertaTokenizer(snake_case_ , keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def _A( self ):
lowercase ='''<pad>'''
lowercase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def _A( self ):
lowercase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(snake_case_ ) , 10_02 )
def _A( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def _A( self ):
lowercase =XLMRobertaTokenizer(snake_case_ , keep_accents=snake_case_ )
lowercase =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(snake_case_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase =tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowercase =tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _A( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase =self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
lowercase =self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
lowercase =tempfile.mkdtemp()
lowercase =tokenizer_r.save_pretrained(snake_case_ )
lowercase =tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(snake_case_ , snake_case_ )
# Checks everything loads correctly in the same way
lowercase =tokenizer_r.from_pretrained(snake_case_ )
lowercase =tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=True
lowercase =tempfile.mkdtemp()
lowercase =tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ )
lowercase =tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case_ , snake_case_ )
# Checks everything loads correctly in the same way
lowercase =tokenizer_r.from_pretrained(snake_case_ )
lowercase =tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=False
lowercase =tempfile.mkdtemp()
lowercase =tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ )
lowercase =tokenizer_p.save_pretrained(snake_case_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase =tokenizer_r.from_pretrained(snake_case_ )
lowercase =tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
shutil.rmtree(snake_case_ )
@cached_property
def _A( self ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def _A( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case_ , f.name )
lowercase =XLMRobertaTokenizer(f.name , keep_accents=snake_case_ )
lowercase =pickle.dumps(snake_case_ )
pickle.loads(snake_case_ )
def _A( self ):
if not self.test_rust_tokenizer:
return
lowercase =self.get_tokenizer()
lowercase =self.get_rust_tokenizer()
lowercase ='''I was born in 92000, and this is falsé.'''
lowercase =tokenizer.tokenize(snake_case_ )
lowercase =rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
lowercase =tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
lowercase =rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
lowercase =self.get_rust_tokenizer()
lowercase =tokenizer.encode(snake_case_ )
lowercase =rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def _A( self ):
lowercase ='''Hello World!'''
lowercase =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def _A( self ):
lowercase =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def _A( self ):
# fmt: off
lowercase ={'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 145 |
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase =input('''Enter message: ''' )
lowercase =input('''Enter key [alphanumeric]: ''' )
lowercase =input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase ='''encrypt'''
lowercase =encrypt_message(lowercase_ , lowercase_ )
elif mode.lower().startswith('''d''' ):
lowercase ='''decrypt'''
lowercase =decrypt_message(lowercase_ , lowercase_ )
print(f'\n{mode.title()}ed message:' )
print(lowercase_ )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> str:
'''simple docstring'''
return translate_message(lowercase_ , lowercase_ , '''encrypt''' )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> str:
'''simple docstring'''
return translate_message(lowercase_ , lowercase_ , '''decrypt''' )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : str ) -> str:
'''simple docstring'''
lowercase =[]
lowercase =0
lowercase =key.upper()
for symbol in message:
lowercase =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowercase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowercase_ ):
lowercase =0
else:
translated.append(lowercase_ )
return "".join(lowercase_ )
if __name__ == "__main__":
main()
| 145 | 1 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Dict=100 , UpperCamelCase_: str=13 , UpperCamelCase_: List[str]=30 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: str=3 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: int=True , UpperCamelCase_: Dict=32 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Union[str, Any]=4 , UpperCamelCase_: Dict=37 , UpperCamelCase_: str="gelu" , UpperCamelCase_: str=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Dict=10 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Any=[0, 1, 2, 3] , ):
UpperCamelCase_ =parent
UpperCamelCase_ =100
UpperCamelCase_ =batch_size
UpperCamelCase_ =image_size
UpperCamelCase_ =patch_size
UpperCamelCase_ =num_channels
UpperCamelCase_ =is_training
UpperCamelCase_ =use_labels
UpperCamelCase_ =hidden_size
UpperCamelCase_ =num_hidden_layers
UpperCamelCase_ =num_attention_heads
UpperCamelCase_ =intermediate_size
UpperCamelCase_ =hidden_act
UpperCamelCase_ =hidden_dropout_prob
UpperCamelCase_ =attention_probs_dropout_prob
UpperCamelCase_ =type_sequence_label_size
UpperCamelCase_ =initializer_range
UpperCamelCase_ =scope
UpperCamelCase_ =out_indices
UpperCamelCase_ =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ =(image_size // patch_size) ** 2
UpperCamelCase_ =num_patches + 1
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ =None
UpperCamelCase_ =None
if self.use_labels:
UpperCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ =self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase__ ( self: Any ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCamelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] ):
UpperCamelCase_ =BeitModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase_ =model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] ):
UpperCamelCase_ =BeitForMaskedImageModeling(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase_ =model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ):
UpperCamelCase_ =self.type_sequence_label_size
UpperCamelCase_ =BeitForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase_ =model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_ =1
UpperCamelCase_ =BeitForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ =model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: str , UpperCamelCase_: Any ):
UpperCamelCase_ =self.num_labels
UpperCamelCase_ =BeitForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCamelCase_ =model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase_ =model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ =self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =config_and_inputs
UpperCamelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Optional[Any] = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Tuple = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[Any] = False
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =BeitModelTester(self )
UpperCamelCase_ =ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def UpperCamelCase__ ( self: List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def UpperCamelCase__ ( self: Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def UpperCamelCase__ ( self: int ):
pass
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ , UpperCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ =model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ , UpperCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ =model_class(UpperCamelCase_ )
UpperCamelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ =[*signature.parameters.keys()]
UpperCamelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[int] ):
if not self.model_tester.is_training:
return
UpperCamelCase_ , UpperCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(UpperCamelCase_ ), BeitForMaskedImageModeling]:
continue
UpperCamelCase_ =model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
UpperCamelCase_ =self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
UpperCamelCase_ =model(**UpperCamelCase_ ).loss
loss.backward()
def UpperCamelCase__ ( self: Union[str, Any] ):
UpperCamelCase_ , UpperCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ =False
UpperCamelCase_ =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(UpperCamelCase_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase_ =model_class(UpperCamelCase_ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase_ )
model.train()
UpperCamelCase_ =self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
UpperCamelCase_ =model(**UpperCamelCase_ ).loss
loss.backward()
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ , UpperCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ =_config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
UpperCamelCase_ =model_class(config=UpperCamelCase_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def UpperCamelCase__ ( self: int ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ =BeitModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _UpperCamelCase ( ):
UpperCamelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self: str ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(UpperCamelCase_ )
UpperCamelCase_ =self.default_image_processor
UpperCamelCase_ =prepare_img()
UpperCamelCase_ =image_processor(images=UpperCamelCase_ , return_tensors="pt" ).pixel_values.to(UpperCamelCase_ )
# prepare bool_masked_pos
UpperCamelCase_ =torch.ones((1, 196) , dtype=torch.bool ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase_ =model(pixel_values=UpperCamelCase_ , bool_masked_pos=UpperCamelCase_ )
UpperCamelCase_ =outputs.logits
# verify the logits
UpperCamelCase_ =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , UpperCamelCase_ )
UpperCamelCase_ =torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase_ , atol=1e-2 ) )
@slow
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(UpperCamelCase_ )
UpperCamelCase_ =self.default_image_processor
UpperCamelCase_ =prepare_img()
UpperCamelCase_ =image_processor(images=UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase_ =model(**UpperCamelCase_ )
UpperCamelCase_ =outputs.logits
# verify the logits
UpperCamelCase_ =torch.Size((1, 1000) )
self.assertEqual(logits.shape , UpperCamelCase_ )
UpperCamelCase_ =torch.tensor([-1.2385, -1.0987, -1.0108] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) )
UpperCamelCase_ =281
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase_ )
@slow
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
UpperCamelCase_ )
UpperCamelCase_ =self.default_image_processor
UpperCamelCase_ =prepare_img()
UpperCamelCase_ =image_processor(images=UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase_ =model(**UpperCamelCase_ )
UpperCamelCase_ =outputs.logits
# verify the logits
UpperCamelCase_ =torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , UpperCamelCase_ )
UpperCamelCase_ =torch.tensor([1.6881, -0.2787, 0.5901] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) )
UpperCamelCase_ =2396
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase_ )
@slow
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCamelCase_ =model.to(UpperCamelCase_ )
UpperCamelCase_ =BeitImageProcessor(do_resize=UpperCamelCase_ , size=640 , do_center_crop=UpperCamelCase_ )
UpperCamelCase_ =load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCamelCase_ =Image.open(ds[0]["file"] )
UpperCamelCase_ =image_processor(images=UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase_ =model(**UpperCamelCase_ )
UpperCamelCase_ =outputs.logits
# verify the logits
UpperCamelCase_ =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , UpperCamelCase_ )
UpperCamelCase_ =version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCamelCase_ =torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=UpperCamelCase_ , )
else:
UpperCamelCase_ =torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=UpperCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCamelCase_ =model.to(UpperCamelCase_ )
UpperCamelCase_ =BeitImageProcessor(do_resize=UpperCamelCase_ , size=640 , do_center_crop=UpperCamelCase_ )
UpperCamelCase_ =load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCamelCase_ =Image.open(ds[0]["file"] )
UpperCamelCase_ =image_processor(images=UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase_ =model(**UpperCamelCase_ )
UpperCamelCase_ =outputs.logits.detach().cpu()
UpperCamelCase_ =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(500, 300)] )
UpperCamelCase_ =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
UpperCamelCase_ =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ )
UpperCamelCase_ =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
| 391 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __lowerCAmelCase ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = BertJapaneseTokenizer
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = True
def UpperCamelCase__ ( self: int ):
super().setUp()
UpperCamelCase_ =[
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
UpperCamelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
UpperCamelCase_ ="こんにちは、世界。 \nこんばんは、世界。"
UpperCamelCase_ ="こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def UpperCamelCase__ ( self: Tuple , UpperCamelCase_: Any ):
UpperCamelCase_ , UpperCamelCase_ =self.get_input_output_texts(UpperCamelCase_ )
UpperCamelCase_ =tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
return text, ids
def UpperCamelCase__ ( self: Optional[int] ):
pass # TODO add if relevant
def UpperCamelCase__ ( self: int ):
pass # TODO add if relevant
def UpperCamelCase__ ( self: Union[str, Any] ):
pass # TODO add if relevant
def UpperCamelCase__ ( self: Union[str, Any] ):
UpperCamelCase_ =self.tokenizer_class(self.vocab_file )
UpperCamelCase_ =tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(UpperCamelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ =self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(UpperCamelCase_ )
UpperCamelCase_ ="こんにちは、世界。\nこんばんは、世界。"
UpperCamelCase_ =tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCamelCase_ =os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase_ , "wb" ) as handle:
pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(UpperCamelCase_ , "rb" ) as handle:
UpperCamelCase_ =pickle.load(UpperCamelCase_ )
UpperCamelCase_ =tokenizer_new.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase__ ( self: Optional[Any] ):
try:
UpperCamelCase_ =MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase__ ( self: List[str] ):
try:
UpperCamelCase_ =MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =MecabTokenizer(do_lower_case=UpperCamelCase_ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase__ ( self: Dict ):
try:
UpperCamelCase_ =MecabTokenizer(
do_lower_case=UpperCamelCase_ , normalize_text=UpperCamelCase_ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =MecabTokenizer(normalize_text=UpperCamelCase_ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(UpperCamelCase_ )
UpperCamelCase_ ="こんにちは、世界。\nこんばんは、世界。"
UpperCamelCase_ =tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCamelCase_ =os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase_ , "wb" ) as handle:
pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(UpperCamelCase_ , "rb" ) as handle:
UpperCamelCase_ =pickle.load(UpperCamelCase_ )
UpperCamelCase_ =tokenizer_new.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@require_sudachi
def UpperCamelCase__ ( self: Union[str, Any] ):
UpperCamelCase_ =SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ =SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =SudachiTokenizer(do_lower_case=UpperCamelCase_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =SudachiTokenizer(normalize_text=UpperCamelCase_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =SudachiTokenizer(trim_whitespace=UpperCamelCase_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def UpperCamelCase__ ( self: Union[str, Any] ):
UpperCamelCase_ =self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(UpperCamelCase_ )
UpperCamelCase_ ="こんにちは、世界。\nこんばんは、世界。"
UpperCamelCase_ =tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCamelCase_ =os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase_ , "wb" ) as handle:
pickle.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(UpperCamelCase_ , "rb" ) as handle:
UpperCamelCase_ =pickle.load(UpperCamelCase_ )
UpperCamelCase_ =tokenizer_new.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@require_jumanpp
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def UpperCamelCase__ ( self: Any ):
UpperCamelCase_ =JumanppTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =JumanppTokenizer(normalize_text=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =JumanppTokenizer(trim_whitespace=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
UpperCamelCase_ ={}
for i, token in enumerate(UpperCamelCase_ ):
UpperCamelCase_ =i
UpperCamelCase_ =WordpieceTokenizer(vocab=UpperCamelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
UpperCamelCase_ =tokenizer.subword_tokenizer
UpperCamelCase_ =subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(UpperCamelCase_ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
UpperCamelCase_ =subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(UpperCamelCase_ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ =self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
UpperCamelCase_ =tokenizer.encode("ありがとう。" , add_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
UpperCamelCase_ =tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowerCAmelCase ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = BertJapaneseTokenizer
__lowerCamelCase : Optional[int] = False
def UpperCamelCase__ ( self: Any ):
super().setUp()
UpperCamelCase_ =["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCamelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase__ ( self: Dict , **UpperCamelCase_: List[Any] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Dict ):
UpperCamelCase_ ="こんにちは、世界。 \nこんばんは、世界。"
UpperCamelCase_ ="こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def UpperCamelCase__ ( self: int ):
pass # TODO add if relevant
def UpperCamelCase__ ( self: Tuple ):
pass # TODO add if relevant
def UpperCamelCase__ ( self: Dict ):
pass # TODO add if relevant
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
UpperCamelCase_ =tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
UpperCamelCase_ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCamelCase_ ={}
for i, token in enumerate(UpperCamelCase_ ):
UpperCamelCase_ =i
UpperCamelCase_ =CharacterTokenizer(vocab=UpperCamelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
UpperCamelCase_ =tokenizer.encode("ありがとう。" , add_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
UpperCamelCase_ =tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ ="cl-tohoku/bert-base-japanese"
UpperCamelCase_ =AutoTokenizer.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ ="cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(UpperCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
UpperCamelCase_ ="bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 391 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase = [3, 3, 3, 3]
lowercase = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase = [4, 4, 4, 4]
lowercase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase = [3, 3, 3, 3]
else:
lowercase = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase = 96
elif "small" in model_name:
lowercase = 96
elif "base" in model_name:
lowercase = 128
elif "large" in model_name:
lowercase = 192
elif "xlarge" in model_name:
lowercase = 256
elif "huge" in model_name:
lowercase = 352
# set label information
lowercase = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase = '''imagenet-22k-id2label.json'''
else:
lowercase = '''imagenet-1k-id2label.json'''
lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowercase = {v: k for k, v in idalabel.items()}
lowercase = FocalNetConfig(
embed_dim=lowerCAmelCase__ , depths=lowerCAmelCase__ , focal_levels=lowerCAmelCase__ , focal_windows=lowerCAmelCase__ , use_conv_embed=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , use_post_layernorm=lowerCAmelCase__ , use_layerscale=lowerCAmelCase__ , )
return config
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if "patch_embed.proj" in name:
lowercase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase = '''encoder.''' + name
if "encoder.layers" in name:
lowercase = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase = '''layernorm.weight'''
if name == "norm.bias":
lowercase = '''layernorm.bias'''
if "head" in name:
lowercase = name.replace('''head''' , '''classifier''' )
else:
lowercase = '''focalnet.''' + name
return name
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
# fmt: off
lowercase = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , lowerCAmelCase__ )
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase = state_dict.pop(lowerCAmelCase__ )
lowercase = val
lowercase = get_focalnet_config(lowerCAmelCase__ )
lowercase = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase__ , crop_size=224 , do_normalize=lowerCAmelCase__ , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ , )
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
lowercase = processor(images=lowerCAmelCase__ , return_tensors='''pt''' )
lowercase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
lowercase = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCAmelCase__ , atol=1E-4 )
lowercase = model(**lowerCAmelCase__ )
lowercase = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
lowercase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
lowercase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
lowercase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
lowercase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
lowercase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(f'{model_name}' )
processor.push_to_hub(f'{model_name}' )
if __name__ == "__main__":
lowercase__ :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
lowercase__ :Any = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 633 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Dict = PegasusTokenizer
lowerCAmelCase : Any = PegasusTokenizerFast
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : Tuple = True
def lowerCAmelCase ( self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase: Any = PegasusTokenizer(_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def lowerCAmelCase ( self : Dict , **_lowercase : Dict ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase ( self : str , _lowercase : Tuple ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: List[str] = "</s>"
_UpperCamelCase: Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(_lowercase ) , 1_103 )
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCamelCase: str = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCamelCase: int = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
_UpperCamelCase: Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_lowercase , add_special_tokens=_lowercase ).input_ids[0]
_UpperCamelCase: Tuple = py_tokenizer([raw_input_str] , return_tensors=_lowercase , add_special_tokens=_lowercase ).input_ids[0]
self.assertListEqual(_lowercase , _lowercase )
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_UpperCamelCase: int = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
_UpperCamelCase: str = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
_UpperCamelCase: str = tokenizer([raw_input_str] , return_tensors=_lowercase ).input_ids[0]
self.assertListEqual(_lowercase , _lowercase )
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
_UpperCamelCase: Dict = "To ensure a smooth flow of bank resolutions."
_UpperCamelCase: Dict = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
_UpperCamelCase: List[Any] = tokenizer([raw_input_str] , return_tensors=_lowercase ).input_ids[0]
self.assertListEqual(_lowercase , _lowercase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: List[str] = ["This is going to be way too long." * 150, "short example"]
_UpperCamelCase: Any = ["not super long but more than 5 tokens", "tiny"]
_UpperCamelCase: Optional[int] = self._large_tokenizer(_lowercase , padding=_lowercase , truncation=_lowercase , return_tensors='''pt''' )
_UpperCamelCase: Dict = self._large_tokenizer(
text_target=_lowercase , max_length=5 , padding=_lowercase , truncation=_lowercase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowercase ) == 2 # input_ids, attention_mask.
@slow
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Tuple = {"input_ids": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : List[Any] = PegasusTokenizer
lowerCAmelCase : List[str] = PegasusTokenizerFast
lowerCAmelCase : int = True
lowerCAmelCase : Dict = True
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase: int = PegasusTokenizer(_lowercase , offset=0 , mask_token_sent=_lowercase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def lowerCAmelCase ( self : List[Any] , **_lowercase : int ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase ( self : str , _lowercase : Dict ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCamelCase: int = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCamelCase: Optional[Any] = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
_UpperCamelCase: Tuple = rust_tokenizer([raw_input_str] , return_tensors=_lowercase , add_special_tokens=_lowercase ).input_ids[0]
_UpperCamelCase: Any = py_tokenizer([raw_input_str] , return_tensors=_lowercase , add_special_tokens=_lowercase ).input_ids[0]
self.assertListEqual(_lowercase , _lowercase )
@require_torch
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = ["This is going to be way too long." * 1_000, "short example"]
_UpperCamelCase: List[Any] = ["not super long but more than 5 tokens", "tiny"]
_UpperCamelCase: Dict = self._large_tokenizer(_lowercase , padding=_lowercase , truncation=_lowercase , return_tensors='''pt''' )
_UpperCamelCase: List[Any] = self._large_tokenizer(
text_target=_lowercase , max_length=5 , padding=_lowercase , truncation=_lowercase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowercase ) == 2 # input_ids, attention_mask.
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Tuple = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
_UpperCamelCase: str = self._large_tokenizer(_lowercase ).input_ids
self.assertListEqual(
_lowercase , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 271 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = (IPNDMScheduler,)
lowercase_ = (("""num_inference_steps""", 5_0),)
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : str = {"num_train_timesteps": 1_000}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : int=0 , **SCREAMING_SNAKE_CASE : int ):
lowercase__ : Any = dict(self.forward_default_kwargs )
lowercase__ : int = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.dummy_sample
lowercase__ : str = 0.1 * sample
lowercase__ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : Optional[int] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowercase__ : Optional[Any] = dummy_past_residuals[:]
if time_step is None:
lowercase__ : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
lowercase__ : Any = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowercase__ : Union[str, Any] = dummy_past_residuals[:]
lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ : str = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Optional[int] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case ( self : List[str] ):
pass
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Dict=0 , **SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : Optional[int] = dict(self.forward_default_kwargs )
lowercase__ : Dict = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : Any = self.get_scheduler_config()
lowercase__ : Any = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ : List[Any] = dummy_past_residuals[:]
if time_step is None:
lowercase__ : str = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ : Tuple = dummy_past_residuals[:]
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : int = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = 10
lowercase__ : Any = self.dummy_model()
lowercase__ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : str = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : str = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def snake_case ( self : Tuple ):
lowercase__ : str = dict(self.forward_default_kwargs )
lowercase__ : Dict = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowercase__ : Any = self.get_scheduler_config()
lowercase__ : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : int = self.dummy_sample
lowercase__ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , "set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , "set_timesteps" ):
lowercase__ : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ : str = dummy_past_residuals[:]
lowercase__ : List[str] = scheduler.timesteps[5]
lowercase__ : Tuple = scheduler.timesteps[6]
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : str = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase__ : Dict = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Dict = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self : int ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE , time_step=SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.full_loop()
lowercase__ : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 496 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 707 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple =logging.get_logger(__name__)
def _lowercase ( _SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
'''simple docstring'''
__A : List[str] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
if "model" in sd.keys():
__A : int = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# pop unnecessary weights
__A : str = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_SCREAMING_SNAKE_CASE )
__A : List[str] = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__A : Any = sd.pop(_SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__A : Tuple = sd[key]
# We split QKV in separate Q,K,V
__A : Any = key.replace('.qkv_proj.' , '.q_proj.' )
__A : Any = key.replace('.qkv_proj.' , '.k_proj.' )
__A : Any = key.replace('.qkv_proj.' , '.v_proj.' )
__A : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__A , __A , __A : List[str] = torch.split(_SCREAMING_SNAKE_CASE , depth // 3 , dim=0 )
__A : Optional[int] = q
__A : int = k
__A : List[str] = v
del sd[key]
return sd
@torch.no_grad()
def _lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=None ) -> List[str]:
'''simple docstring'''
__A : Dict = load_checkpoint(_SCREAMING_SNAKE_CASE )
if config is not None:
__A : Any = OPTConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
__A : Tuple = OPTConfig()
__A : Any = OPTModel(_SCREAMING_SNAKE_CASE ).half().eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check results
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
lowerCamelCase : Dict =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 237 | 0 |
import math
def __lowerCAmelCase ( a__ , a__ ) -> int:
__a = len(_UpperCamelCase )
__a = int(math.floor(math.sqrt(_UpperCamelCase ) ) )
__a = 0
while arr[min(_UpperCamelCase , _UpperCamelCase ) - 1] < x:
__a = step
step += int(math.floor(math.sqrt(_UpperCamelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__a = prev + 1
if prev == min(_UpperCamelCase , _UpperCamelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
A : Dict = [int(item) for item in user_input.split(',')]
A : Any = int(input('Enter the number to be searched:\n'))
A : Tuple = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"Number {x} is at index {res}")
| 219 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = "▁"
lowerCamelCase : Union[str, Any] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
lowerCamelCase : Union[str, Any] = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
lowerCamelCase : List[str] = {
"facebook/s2t-small-librispeech-asr": 1_0_2_4,
}
lowerCamelCase : str = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
lowerCamelCase : List[Any] = {"mustc": MUSTC_LANGS}
class A__ ( A__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = MAX_MODEL_INPUT_SIZES
A__ = ['input_ids', 'attention_mask']
A__ = []
def __init__( self : List[str] , _a : Tuple , _a : Optional[Any] , _a : Tuple="<s>" , _a : List[Any]="</s>" , _a : Union[str, Any]="<pad>" , _a : List[Any]="<unk>" , _a : Optional[int]=False , _a : Optional[Any]=False , _a : List[str]=None , _a : Any=None , _a : Optional[Dict[str, Any]] = None , **_a : str , ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , do_upper_case=_a , do_lower_case=_a , tgt_lang=_a , lang_codes=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_SCREAMING_SNAKE_CASE =do_upper_case
_SCREAMING_SNAKE_CASE =do_lower_case
_SCREAMING_SNAKE_CASE =load_json(_a )
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.encoder.items()}
_SCREAMING_SNAKE_CASE =spm_file
_SCREAMING_SNAKE_CASE =load_spm(_a , self.sp_model_kwargs )
if lang_codes is not None:
_SCREAMING_SNAKE_CASE =lang_codes
_SCREAMING_SNAKE_CASE =LANGUAGES[lang_codes]
_SCREAMING_SNAKE_CASE =[f"<lang:{lang}>" for lang in self.langs]
_SCREAMING_SNAKE_CASE ={lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
_SCREAMING_SNAKE_CASE =self.lang_tokens
_SCREAMING_SNAKE_CASE =tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_SCREAMING_SNAKE_CASE ={}
@property
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return len(self.encoder )
@property
def A ( self : str ) -> str:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def A ( self : Dict , _a : Optional[int] ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =new_tgt_lang
self.set_tgt_lang_special_tokens(_a )
def A ( self : Any , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.lang_code_to_id[tgt_lang]
_SCREAMING_SNAKE_CASE =[lang_code_id]
def A ( self : Any , _a : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_a , out_type=_a )
def A ( self : List[str] , _a : Optional[Any] ) -> Dict:
'''simple docstring'''
return self.encoder.get(_a , self.encoder[self.unk_token] )
def A ( self : str , _a : int ) -> str:
'''simple docstring'''
return self.decoder.get(_a , self.unk_token )
def A ( self : Any , _a : List[str] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_SCREAMING_SNAKE_CASE =self.sp_model.decode(_a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_SCREAMING_SNAKE_CASE =[]
else:
current_sub_tokens.append(_a )
_SCREAMING_SNAKE_CASE =self.sp_model.decode(_a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def A ( self : Union[str, Any] , _a : List[Any] , _a : List[str]=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
_SCREAMING_SNAKE_CASE =[1] * len(self.prefix_tokens )
_SCREAMING_SNAKE_CASE =[1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def A ( self : str ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.__dict__.copy()
_SCREAMING_SNAKE_CASE =None
return state
def __setstate__( self : List[Any] , _a : Dict ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =load_spm(self.spm_file , self.sp_model_kwargs )
def A ( self : Any , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Path(_a )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
_SCREAMING_SNAKE_CASE =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_SCREAMING_SNAKE_CASE =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _a )
if os.path.abspath(self.spm_file ) != os.path.abspath(_a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _a )
elif not os.path.isfile(self.spm_file ):
with open(_a , 'wb' ) as fi:
_SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(_a )
return (str(_a ), str(_a ))
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =sentencepiece.SentencePieceProcessor(**_UpperCamelCase )
spm.Load(str(_UpperCamelCase ) )
return spm
def _lowerCAmelCase ( _UpperCamelCase : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(_UpperCamelCase , 'r' ) as f:
return json.load(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str ) -> None:
"""simple docstring"""
with open(_UpperCamelCase , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=2 )
| 405 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
lowercase__ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase__ = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _snake_case ( ):
_lowerCamelCase : List[str] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_lowerCamelCase : Any = bs[:]
_lowerCamelCase : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
_lowerCamelCase : Dict = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ , lowercase__ ) )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Union[str, Any] = set()
_lowerCamelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase : Any = char
return pairs
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase , lowercase , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , **lowercase , ):
_lowerCamelCase : Dict = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else bos_token
_lowerCamelCase : List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else eos_token
_lowerCamelCase : Optional[int] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else sep_token
_lowerCamelCase : Union[str, Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else cls_token
_lowerCamelCase : Union[str, Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else unk_token
_lowerCamelCase : List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase : Union[str, Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
errors=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , **lowercase , )
with open(lowercase , encoding='utf-8' ) as vocab_handle:
_lowerCamelCase : List[str] = json.load(lowercase )
_lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : Dict = errors # how to handle errors in decoding
_lowerCamelCase : Tuple = bytes_to_unicode()
_lowerCamelCase : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase , encoding='utf-8' ) as merges_handle:
_lowerCamelCase : int = merges_handle.read().split('\n' )[1:-1]
_lowerCamelCase : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCamelCase : Tuple = dict(zip(lowercase , range(len(lowercase ) ) ) )
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCamelCase : str = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A_ ( self ):
return len(self.encoder )
def A_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self , lowercase ):
if token in self.cache:
return self.cache[token]
_lowerCamelCase : List[Any] = tuple(lowercase )
_lowerCamelCase : List[Any] = get_pairs(lowercase )
if not pairs:
return token
while True:
_lowerCamelCase : Union[str, Any] = min(lowercase , key=lambda lowercase : self.bpe_ranks.get(lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase : Optional[int] = bigram
_lowerCamelCase : Any = []
_lowerCamelCase : List[Any] = 0
while i < len(lowercase ):
try:
_lowerCamelCase : str = word.index(lowercase , lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase : str = j
if word[i] == first and i < len(lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase : Tuple = tuple(lowercase )
_lowerCamelCase : Tuple = new_word
if len(lowercase ) == 1:
break
else:
_lowerCamelCase : List[str] = get_pairs(lowercase )
_lowerCamelCase : List[Any] = ' '.join(lowercase )
_lowerCamelCase : str = word
return word
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[int] = []
for token in re.findall(self.pat , lowercase ):
_lowerCamelCase : int = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase ).split(' ' ) )
return bpe_tokens
def A_ ( self , lowercase ):
return self.encoder.get(lowercase , self.encoder.get(self.unk_token ) )
def A_ ( self , lowercase ):
return self.decoder.get(lowercase )
def A_ ( self , lowercase ):
_lowerCamelCase : List[str] = ''.join(lowercase )
_lowerCamelCase : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def A_ ( self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Dict = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : Any = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase , ensure_ascii=lowercase ) + '\n' )
_lowerCamelCase : List[Any] = 0
with open(lowercase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_lowerCamelCase : Optional[Any] = token_index
writer.write(' '.join(lowercase ) + '\n' )
index += 1
return vocab_file, merge_file
def A_ ( self , lowercase , lowercase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
_lowerCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : Dict = [self.sep_token_id]
_lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , lowercase , lowercase=False , **lowercase ):
_lowerCamelCase : Union[str, Any] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase ) > 0 and not text[0].isspace()):
_lowerCamelCase : int = ' ' + text
return (text, kwargs)
def A_ ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ):
_lowerCamelCase : Dict = super()._pad(
encoded_inputs=lowercase , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : List[str] = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Any = len(encoded_inputs['global_attention_mask'] ) != len(lowercase )
if needs_to_be_padded:
_lowerCamelCase : int = len(lowercase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : str = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 718 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """realm"""
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=128 , lowercase=12 , lowercase=12 , lowercase=8 , lowercase=3072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=256 , lowercase=10 , lowercase=1E-3 , lowercase=5 , lowercase=320 , lowercase=13353718 , lowercase=5000 , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
# Common config
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[Any] = retriever_proj_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : int = num_candidates
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : int = layer_norm_eps
# Reader config
_lowerCamelCase : Tuple = span_hidden_size
_lowerCamelCase : int = max_span_width
_lowerCamelCase : Tuple = reader_layer_norm_eps
_lowerCamelCase : Union[str, Any] = reader_beam_size
_lowerCamelCase : Union[str, Any] = reader_seq_len
# Retrieval config
_lowerCamelCase : Optional[Any] = num_block_records
_lowerCamelCase : str = searcher_beam_size
| 492 | 0 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( a ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = SMALL_MODEL_IDENTIFIER
lowerCamelCase_ = '''pt'''
lowerCamelCase_ = '''tf'''
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase__ )
model_tf.save_pretrained(UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = '''mock_framework'''
# Framework provided - return whatever the user provides
lowerCamelCase_ = FeaturesManager.determine_framework(self.test_model , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
lowerCamelCase_ = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
lowerCamelCase_ = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
lowerCamelCase_ = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
lowerCamelCase_ = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase_ = FeaturesManager.determine_framework(UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = MagicMock(return_value=UpperCamelCase__ )
with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase__ ):
lowerCamelCase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase_ = MagicMock(return_value=UpperCamelCase__ )
with patch('''transformers.onnx.features.is_torch_available''' , UpperCamelCase__ ):
lowerCamelCase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase_ = MagicMock(return_value=UpperCamelCase__ )
lowerCamelCase_ = MagicMock(return_value=UpperCamelCase__ )
with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase__ ), patch(
'''transformers.onnx.features.is_torch_available''' , UpperCamelCase__ ):
lowerCamelCase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase_ = MagicMock(return_value=UpperCamelCase__ )
lowerCamelCase_ = MagicMock(return_value=UpperCamelCase__ )
with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase__ ), patch(
'''transformers.onnx.features.is_torch_available''' , UpperCamelCase__ ):
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase_ = FeaturesManager.determine_framework(self.test_model )
| 142 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
lowerCamelCase_ = os.path.dirname(os.path.realpath(_lowerCamelCase ) )
lowerCamelCase_ = os.path.join(_lowerCamelCase , '''triangle.txt''' )
with open(_lowerCamelCase ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = []
for line in triangle:
lowerCamelCase_ = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_lowerCamelCase ) )
a.append(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(len(a[i] ) ):
lowerCamelCase_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCamelCase_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCamelCase , _lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 142 | 1 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __UpperCamelCase ( SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=1_00 , SCREAMING_SNAKE_CASE=10_26 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , ) -> Optional[Any]:
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
__snake_case , __snake_case = generate_datasets(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , number=SCREAMING_SNAKE_CASE , min_len=10_26 , trim=SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__snake_case = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
__snake_case = load_gpta("gpt2" ).to(SCREAMING_SNAKE_CASE )
print("computing perplexity on objective set" )
__snake_case = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).item()
print("perplexity on objective set:" , SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=15 , SCREAMING_SNAKE_CASE=1_28 , SCREAMING_SNAKE_CASE=1_00 , SCREAMING_SNAKE_CASE="igf_model.pt" , ) -> int:
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
__snake_case = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
__snake_case = SecondaryLearner(SCREAMING_SNAKE_CASE )
# Train secondary learner
__snake_case = train_secondary_learner(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_epochs=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , eval_freq=1_00 , igf_model_path=SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=10_00 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=recopy_gpta , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
__snake_case = RandomSampler(SCREAMING_SNAKE_CASE )
__snake_case = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE )
__snake_case = max_steps // (len(SCREAMING_SNAKE_CASE )) + 1
__snake_case = 0
__snake_case = torch.zeros((1, context_len) , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
__snake_case , __snake_case , __snake_case = recopy_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(SCREAMING_SNAKE_CASE )
secondary_learner.eval()
__snake_case = []
__snake_case = 0
__snake_case = []
__snake_case = []
# Compute the performance of the transformer model at the beginning
__snake_case = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print("Test perplexity, step" , SCREAMING_SNAKE_CASE , ":" , SCREAMING_SNAKE_CASE )
for epoch in range(int(SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
__snake_case = random.randint(0 , example.size(2 ) - context_len - 1 )
__snake_case = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__snake_case = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
__snake_case = True
if secondary_learner is not None:
__snake_case = secondary_learner.forward(
torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__snake_case = -1
if predicted_q < threshold:
__snake_case = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__snake_case = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__snake_case = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__snake_case = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print("Test perplexity, step" , SCREAMING_SNAKE_CASE , ":" , SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
__snake_case = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=SCREAMING_SNAKE_CASE , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=1_00 , type=SCREAMING_SNAKE_CASE , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=1_00 , type=SCREAMING_SNAKE_CASE , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=10_00 , type=SCREAMING_SNAKE_CASE , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=1_28 , type=SCREAMING_SNAKE_CASE , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=SCREAMING_SNAKE_CASE , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=SCREAMING_SNAKE_CASE , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=1_00 , type=SCREAMING_SNAKE_CASE , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=10_26 , type=SCREAMING_SNAKE_CASE , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=SCREAMING_SNAKE_CASE , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=SCREAMING_SNAKE_CASE , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=SCREAMING_SNAKE_CASE , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
__snake_case = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
__snake_case = training_secondary_learner(
SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
__snake_case = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__snake_case , __snake_case = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=SCREAMING_SNAKE_CASE , secondary_learner=SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 614 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
__snake_case = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__snake_case = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__snake_case = 4
__snake_case = True
# hparam_utils.py hparams
__snake_case = 0.664_694
__snake_case = 0.207_951
__snake_case = 0.121_194
__snake_case = True
__snake_case = True
__snake_case = False
__snake_case = 0.0_352_513
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__snake_case = 4
__snake_case = False
# hparam_utils.py hparams
__snake_case = 36.4_519
__snake_case = 0.903_421
__snake_case = 222.088
__snake_case = True
__snake_case = True
__snake_case = True
__snake_case = 0.763_141
__snake_case = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__snake_case = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__snake_case = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__snake_case = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
__snake_case = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=5_12 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 614 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : List[str] = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 261 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class snake_case :
"""simple docstring"""
_lowerCAmelCase = LEDConfig
_lowerCAmelCase = {}
_lowerCAmelCase = 'gelu'
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=20 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=4 , ) -> Dict:
"""simple docstring"""
snake_case__ : int = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : Any = is_training
snake_case__ : Any = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : List[Any] = eos_token_id
snake_case__ : Optional[Any] = pad_token_id
snake_case__ : Tuple = bos_token_id
snake_case__ : Optional[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
snake_case__ : Optional[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
snake_case__ : Optional[Any] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
snake_case__ : str = prepare_led_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : str = tf.concat(
[tf.zeros_like(lowerCamelCase )[:, :-1], tf.ones_like(lowerCamelCase )[:, -1:]] , axis=-1 , )
snake_case__ : List[str] = global_attention_mask
return config, inputs_dict
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] = TFLEDModel(config=lowerCamelCase ).get_decoder()
snake_case__ : str = inputs_dict['''input_ids''']
snake_case__ : Tuple = input_ids[:1, :]
snake_case__ : List[str] = inputs_dict['''attention_mask'''][:1, :]
snake_case__ : Union[str, Any] = 1
# first forward pass
snake_case__ : Dict = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
snake_case__ ,snake_case__ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ : List[str] = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
snake_case__ : int = model(lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase , lowerCamelCase , rtol=1E-3 )
def _A ( snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Tuple=None , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Dict = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ : Tuple = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCAmelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCAmelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = TFLEDModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ ,snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = tf.zeros_like(inputs_dict['''attention_mask'''] )
snake_case__ : Union[str, Any] = 2
snake_case__ : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
snake_case__ : Optional[Any] = True
snake_case__ : Optional[int] = self.model_tester.seq_length
snake_case__ : str = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCamelCase ):
snake_case__ : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowerCamelCase ):
snake_case__ : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
snake_case__ : Optional[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
snake_case__ : Dict = True
snake_case__ : int = False
snake_case__ : Optional[int] = False
snake_case__ : Optional[int] = model_class(lowerCamelCase )
snake_case__ : Any = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Dict = len(lowerCamelCase )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
if self.is_encoder_decoder:
snake_case__ : List[str] = model_class(lowerCamelCase )
snake_case__ : Optional[int] = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_decoder_attentions_output(lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case__ : Union[str, Any] = True
snake_case__ : str = model_class(lowerCamelCase )
snake_case__ : Union[str, Any] = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
# Check attention is always last and order is fine
snake_case__ : Tuple = True
snake_case__ : Any = True
snake_case__ : Any = model_class(lowerCamelCase )
snake_case__ : Any = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> int:
"""simple docstring"""
pass
def _A ( snake_case__ : Optional[int] ):
return tf.constant(snake_case__ , dtype=tf.intaa )
_lowerCAmelCase : Tuple = 1E-4
@slow
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
snake_case__ : Dict = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
snake_case__ : str = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
snake_case__ : str = prepare_led_inputs_dict(model.config , lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(**lowerCamelCase )[0]
snake_case__ : str = (1, 1024, 768)
self.assertEqual(output.shape , lowerCamelCase )
# change to expected output here
snake_case__ : Optional[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1E-3 )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
snake_case__ : Optional[int] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
snake_case__ : str = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
snake_case__ : str = prepare_led_inputs_dict(model.config , lowerCamelCase , lowerCamelCase )
snake_case__ : Tuple = model(**lowerCamelCase )[0]
snake_case__ : List[str] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , lowerCamelCase )
# change to expected output here
snake_case__ : Any = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1E-3 , rtol=1E-3 )
| 261 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class a_ :
def __init__( self ):
a_ = []
a_ = 0
a_ = 0
def lowerCAmelCase__ ( self ):
return self.head == self.tail
def lowerCAmelCase__ ( self , UpperCAmelCase ):
self.data.append(__UpperCamelCase )
a_ = self.tail + 1
def lowerCAmelCase__ ( self ):
a_ = self.data[self.head]
a_ = self.head + 1
return ret
def lowerCAmelCase__ ( self ):
return self.tail - self.head
def lowerCAmelCase__ ( self ):
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a_ :
def __init__( self , UpperCAmelCase ):
a_ = data
a_ = None
a_ = None
a_ = 1
def lowerCAmelCase__ ( self ):
return self.data
def lowerCAmelCase__ ( self ):
return self.left
def lowerCAmelCase__ ( self ):
return self.right
def lowerCAmelCase__ ( self ):
return self.height
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = data
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = node
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = node
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = height
def UpperCamelCase_ ( A__ ):
if node is None:
return 0
return node.get_height()
def UpperCamelCase_ ( A__ , A__ ):
if a > b:
return a
return b
def UpperCamelCase_ ( A__ ):
print("""left rotation node:""" , node.get_data() )
a_ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(UpperCAmelCase__ )
a_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
a_ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def UpperCamelCase_ ( A__ ):
print("""right rotation node:""" , node.get_data() )
a_ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(UpperCAmelCase__ )
a_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
a_ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(UpperCAmelCase__ )
return ret
def UpperCamelCase_ ( A__ ):
a_ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(UpperCAmelCase__ ) )
return right_rotation(UpperCAmelCase__ )
def UpperCamelCase_ ( A__ ):
a_ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(UpperCAmelCase__ ) )
return left_rotation(UpperCAmelCase__ )
def UpperCamelCase_ ( A__ , A__ ):
if node is None:
return MyNode(UpperCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , UpperCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
a_ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
a_ = right_rotation(UpperCAmelCase__ )
else:
a_ = lr_rotation(UpperCAmelCase__ )
else:
node.set_right(insert_node(node.get_right() , UpperCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
a_ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
a_ = rl_rotation(UpperCAmelCase__ )
else:
a_ = left_rotation(UpperCAmelCase__ )
a_ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(UpperCAmelCase__ )
return node
def UpperCamelCase_ ( A__ ):
while True:
a_ = root.get_right()
if right_child is None:
break
a_ = right_child
return root.get_data()
def UpperCamelCase_ ( A__ ):
while True:
a_ = root.get_left()
if left_child is None:
break
a_ = left_child
return root.get_data()
def UpperCamelCase_ ( A__ , A__ ):
a_ = root.get_left()
a_ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
a_ = get_left_most(UpperCAmelCase__ )
root.set_data(UpperCAmelCase__ )
root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
elif left_child is not None:
a_ = left_child
elif right_child is not None:
a_ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(UpperCAmelCase__ , UpperCAmelCase__ ) )
if get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
a_ = left_rotation(UpperCAmelCase__ )
else:
a_ = rl_rotation(UpperCAmelCase__ )
elif get_height(UpperCAmelCase__ ) - get_height(UpperCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
a_ = right_rotation(UpperCAmelCase__ )
else:
a_ = lr_rotation(UpperCAmelCase__ )
a_ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(UpperCAmelCase__ )
return root
class a_ :
def __init__( self ):
a_ = None
def lowerCAmelCase__ ( self ):
return get_height(self.root )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
print("""insert:""" + str(__UpperCamelCase ) )
a_ = insert_node(self.root , __UpperCamelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
print("""delete:""" + str(__UpperCamelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
a_ = del_node(self.root , __UpperCamelCase )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
a_ = """"""
a_ = MyQueue()
q.push(self.root )
a_ = self.get_height()
if layer == 0:
return output
a_ = 0
while not q.is_empty():
a_ = q.pop()
a_ = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__UpperCamelCase )
q.push(__UpperCamelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
a_ = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , __UpperCamelCase ) - 1:
a_ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def UpperCamelCase_ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowercase__ =AVLtree()
lowercase__ =list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 715 |
'''simple docstring'''
import math
def UpperCamelCase_ ( A__ ):
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 , A__ , 2 ):
if is_prime[i]:
primes.append(A__ )
return primes
def UpperCamelCase_ ( A__ = 99_99_66_66_33_33 ):
a_ = math.floor(math.sqrt(A__ ) ) + 1_00
a_ = prime_sieve(A__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 511 | 0 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
a_ = logging.get_logger(__name__)
class lowercase__ :
a_ =None
@experimental
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return _map_with_joblib(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = num_proc if num_proc <= len(snake_case_ ) else len(snake_case_ )
lowerCAmelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(snake_case_ ):
lowerCAmelCase__ = len(snake_case_ ) // num_proc
lowerCAmelCase__ = len(snake_case_ ) % num_proc
lowerCAmelCase__ = div * index + min(snake_case_ , snake_case_ )
lowerCAmelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(snake_case_ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"Error dividing inputs iterable among processes. "
F"Total number of objects {len(snake_case_ )}, "
F"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
F"Spawning {num_proc} processes for {len(snake_case_ )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
lowerCAmelCase__ = None, None
if not disable_tqdm:
lowerCAmelCase__ = (RLock(),), tqdm.set_lock
with Pool(snake_case_ , initargs=snake_case_ , initializer=snake_case_ ) as pool:
lowerCAmelCase__ = pool.map(snake_case_ , snake_case_ )
logger.info(F"Finished {num_proc} processes" )
lowerCAmelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"Unpacked {len(snake_case_ )} objects" )
return mapped
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ) -> List[str]:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=snake_case_ ):
return joblib.Parallel()(
joblib.delayed(snake_case_ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _a ( UpperCamelCase_ : Tuple ) -> int:
"""simple docstring"""
lowerCAmelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCAmelCase__ = None
| 339 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "OwlViTImageProcessor"
_a = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> List[str]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _a , )
_A : Optional[Any] = kwargs.pop("""feature_extractor""" )
_A : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , _a="max_length" , _a="np" , **_a ) -> int:
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(_a , _a ) or (isinstance(_a , _a ) and not isinstance(text[0] , _a )):
_A : Optional[int] = [self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )]
elif isinstance(_a , _a ) and isinstance(text[0] , _a ):
_A : Tuple = []
# Maximum number of queries across batch
_A : Optional[int] = max([len(_a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_a ) != max_num_queries:
_A : Optional[Any] = t + [""" """] * (max_num_queries - len(_a ))
_A : Union[str, Any] = self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )
encodings.append(_a )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
_A : Union[str, Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_A : Optional[Any] = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[Any] = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_A : int = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Any = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
_A : List[str] = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Optional[Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_A : List[Any] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
_A : List[Any] = BatchEncoding()
_A : Optional[Any] = input_ids
_A : str = attention_mask
if query_images is not None:
_A : Tuple = BatchEncoding()
_A : Dict = self.image_processor(
_a , return_tensors=_a , **_a ).pixel_values
_A : Optional[Any] = query_pixel_values
if images is not None:
_A : Dict = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
_A : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def a__ ( self , *_a , **_a ) -> List[Any]:
return self.image_processor.post_process(*_a , **_a )
def a__ ( self , *_a , **_a ) -> List[str]:
return self.image_processor.post_process_object_detection(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Optional[Any]:
return self.image_processor.post_process_image_guided_detection(*_a , **_a )
def a__ ( self , *_a , **_a ) -> str:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> Tuple:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Tuple:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , )
return self.image_processor_class
@property
def a__ ( self ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _a , )
return self.image_processor
| 307 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
__lowercase = [True] * (num + 1)
__lowercase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCamelCase__ ):
__lowercase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 721 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688 | 0 |
from __future__ import annotations
import time
a_ = list[tuple[int, int]]
a_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , a : int , a : int , a : int , a : int , a : Node | None ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = pos_x
SCREAMING_SNAKE_CASE : Dict = pos_y
SCREAMING_SNAKE_CASE : int = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Any = goal_x
SCREAMING_SNAKE_CASE : Dict = goal_y
SCREAMING_SNAKE_CASE : str = parent
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : tuple[int, int] , a : tuple[int, int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = Node(start[1] , start[0] , goal[1] , goal[0] , a )
SCREAMING_SNAKE_CASE : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , a )
SCREAMING_SNAKE_CASE : str = [self.start]
SCREAMING_SNAKE_CASE : Any = False
def __UpperCamelCase ( self : str ) -> Path | None:
"""simple docstring"""
while self.node_queue:
SCREAMING_SNAKE_CASE : List[str] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE : Tuple = True
return self.retrace_path(a )
SCREAMING_SNAKE_CASE : str = self.get_successors(a )
for node in successors:
self.node_queue.append(a )
if not self.reached:
return [self.start.pos]
return None
def __UpperCamelCase ( self : Dict , a : Node ) -> list[Node]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = []
for action in delta:
SCREAMING_SNAKE_CASE : int = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(a , a , self.target.pos_y , self.target.pos_x , a ) )
return successors
def __UpperCamelCase ( self : Union[str, Any] , a : Node | None ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = node
SCREAMING_SNAKE_CASE : List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Dict = current_node.parent
path.reverse()
return path
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , a : Optional[int] , a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = BreadthFirstSearch(a , a )
SCREAMING_SNAKE_CASE : Any = BreadthFirstSearch(a , a )
SCREAMING_SNAKE_CASE : Dict = False
def __UpperCamelCase ( self : List[Any] ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_bfs.node_queue.pop(0 )
SCREAMING_SNAKE_CASE : Any = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE : int = True
return self.retrace_bidirectional_path(
a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = current_bwd_node
SCREAMING_SNAKE_CASE : str = current_fwd_node
SCREAMING_SNAKE_CASE : Union[str, Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(a ),
self.bwd_bfs: self.bwd_bfs.get_successors(a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __UpperCamelCase ( self : int , a : Node , a : Node ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.fwd_bfs.retrace_path(a )
SCREAMING_SNAKE_CASE : str = self.bwd_bfs.retrace_path(a )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a_ = (0, 0)
a_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a_ = time.time()
a_ = BreadthFirstSearch(init, goal)
a_ = bfs.search()
a_ = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
a_ = time.time()
a_ = BidirectionalBreadthFirstSearch(init, goal)
a_ = bd_bfs.search()
a_ = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 25 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A : Dict = get_logger(__name__)
A : Dict = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _UpperCamelCase :
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self , __a , __a ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _UpperCamelCase :
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self , __a , __a ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self , __a , __a , __a , **__a ):
for processor in self:
__lowerCAmelCase = inspect.signature(processor.__call__ ).parameters
if len(__a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys() )} for "
f"{processor.__class__} are passed to the logits processor." )
__lowerCAmelCase = processor(__a , __a , __a , **__a )
else:
__lowerCAmelCase = processor(__a , __a , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
if not isinstance(__a , __a ) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}" )
__lowerCAmelCase = temperature
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = scores / self.temperature
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a = -float("Inf" ) , __a = 1 ):
if not isinstance(__a , __a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(__a , __a ) or (min_tokens_to_keep < 1):
raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
__lowerCAmelCase = top_p
__lowerCAmelCase = filter_value
__lowerCAmelCase = min_tokens_to_keep
def __call__( self , __a , __a , __a ):
__lowerCAmelCase , __lowerCAmelCase = lax.top_k(__a , scores.shape[-1] )
__lowerCAmelCase = jnp.full_like(__a , self.filter_value )
__lowerCAmelCase = jax.nn.softmax(__a , axis=-1 ).cumsum(axis=-1 )
__lowerCAmelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowerCAmelCase = jnp.roll(__a , 1 )
score_mask |= score_mask.at[:, 0].set(__a )
# min tokens to keep
__lowerCAmelCase = score_mask.at[:, : self.min_tokens_to_keep].set(__a )
__lowerCAmelCase = jnp.where(__a , __a , __a )
__lowerCAmelCase = jax.lax.sort_key_val(__a , __a )[-1]
return next_scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a = -float("Inf" ) , __a = 1 ):
if not isinstance(__a , __a ) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}" )
__lowerCAmelCase = max(__a , __a )
__lowerCAmelCase = filter_value
def __call__( self , __a , __a , __a ):
__lowerCAmelCase , __lowerCAmelCase = scores.shape
__lowerCAmelCase = jnp.full(batch_size * vocab_size , self.filter_value )
__lowerCAmelCase = min(self.top_k , scores.shape[-1] ) # Safety check
__lowerCAmelCase , __lowerCAmelCase = lax.top_k(__a , __a )
__lowerCAmelCase = jnp.broadcast_to((jnp.arange(__a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__lowerCAmelCase = topk_scores.flatten()
__lowerCAmelCase = topk_indices.flatten() + shift
__lowerCAmelCase = next_scores_flat.at[topk_indices_flat].set(__a )
__lowerCAmelCase = next_scores_flat.reshape(__a , __a )
return next_scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = bos_token_id
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = jnp.full(scores.shape , -float("inf" ) )
__lowerCAmelCase = 1 - jnp.bool_(cur_len - 1 )
__lowerCAmelCase = jnp.where(__a , new_scores.at[:, self.bos_token_id].set(0 ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = max_length
__lowerCAmelCase = eos_token_id
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = jnp.full(scores.shape , -float("inf" ) )
__lowerCAmelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__lowerCAmelCase = jnp.where(__a , new_scores.at[:, self.eos_token_id].set(0 ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a ):
if not isinstance(__a , __a ) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(__a , __a ) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
__lowerCAmelCase = min_length
__lowerCAmelCase = eos_token_id
def __call__( self , __a , __a , __a ):
# create boolean flag to decide if min length penalty should be applied
__lowerCAmelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__lowerCAmelCase = jnp.where(__a , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = list(__a )
__lowerCAmelCase = begin_index
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = 1 - jnp.bool_(cur_len - self.begin_index )
__lowerCAmelCase = jnp.where(__a , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = list(__a )
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = dict(__a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowerCAmelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__lowerCAmelCase = force_token_array.at[index].set(__a )
__lowerCAmelCase = jnp.intaa(__a )
def __call__( self , __a , __a , __a ):
def _force_token(__a ):
__lowerCAmelCase = scores.shape[0]
__lowerCAmelCase = self.force_token_array[generation_idx]
__lowerCAmelCase = jnp.ones_like(__a , dtype=scores.dtype ) * -float("inf" )
__lowerCAmelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__lowerCAmelCase = lax.dynamic_update_slice(__a , __a , (0, current_token) )
return new_scores
__lowerCAmelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__a ) , lambda: scores , ) , )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a , __a ):
__lowerCAmelCase = generate_config.eos_token_id
__lowerCAmelCase = generate_config.no_timestamps_token_id
__lowerCAmelCase = generate_config.no_timestamps_token_id + 1
__lowerCAmelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__a , "max_initial_timestamp_index" ):
__lowerCAmelCase = generate_config.max_initial_timestamp_index
else:
__lowerCAmelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowerCAmelCase = model_config.vocab_size
def __call__( self , __a , __a , __a ):
# suppress <|notimestamps|> which is handled by without_timestamps
__lowerCAmelCase = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(__a , __a ):
__lowerCAmelCase = jnp.where((cur_len - self.begin_index) >= 1 , __a , __a )
__lowerCAmelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __a , )
__lowerCAmelCase = jnp.where((cur_len - self.begin_index) < 2 , __a , __a )
__lowerCAmelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __a , __a , )
return jnp.where(
__a , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __a , )
__lowerCAmelCase = jax.vmap(__a )(__a , __a )
__lowerCAmelCase = jnp.where(cur_len == self.begin_index , __a , __a )
__lowerCAmelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __a , )
__lowerCAmelCase = self.timestamp_begin + self.max_initial_timestamp_index
__lowerCAmelCase = jnp.where(
__a , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __a , )
# if sum of probability over timestamps is above any other token, sample timestamp
__lowerCAmelCase = jax.nn.log_softmax(__a , axis=-1 )
def handle_cumulative_probs(__a , __a ):
__lowerCAmelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__lowerCAmelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __a , )
__lowerCAmelCase = jax.vmap(__a )(__a , __a )
return scores
| 636 | 0 |
from collections.abc import Callable
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
lowerCamelCase : List[Any] =int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase : Tuple =np.zeros((n + 1,) )
lowerCamelCase : Dict =ya
lowerCamelCase : Any =xa
for k in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : List[str] =y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE_ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case_ = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
snake_case_ = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
snake_case_ = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def A__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
def remove_articles(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Tuple =re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(SCREAMING_SNAKE_CASE_ , ''' ''' , SCREAMING_SNAKE_CASE_ )
def white_space_fix(SCREAMING_SNAKE_CASE_ ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) )
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return int(normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ ) )
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] =[any(compute_exact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return (sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )) * 1_0_0
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCamelCase : Any =[rgram for rgrams in rgramslist for rgram in rgrams]
lowerCamelCase : int =Counter(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =Counter(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any =Counter()
for sgram, scount in sgramcounter.items():
lowerCamelCase : Tuple =scount * numref
lowerCamelCase : Optional[int] =Counter(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple =Counter()
for cgram, ccount in cgramcounter.items():
lowerCamelCase : Tuple =ccount * numref
# KEEP
lowerCamelCase : str =sgramcounter_rep & cgramcounter_rep
lowerCamelCase : Union[str, Any] =keepgramcounter_rep & rgramcounter
lowerCamelCase : Optional[Any] =sgramcounter_rep & rgramcounter
lowerCamelCase : Optional[Any] =0
lowerCamelCase : List[Any] =0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : Tuple =1
lowerCamelCase : int =1
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : Tuple =keeptmpscorea / len(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCamelCase : Any =keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCamelCase : Optional[Any] =0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCamelCase : Optional[int] =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCamelCase : int =sgramcounter_rep - cgramcounter_rep
lowerCamelCase : Dict =delgramcounter_rep - rgramcounter
lowerCamelCase : Dict =sgramcounter_rep - rgramcounter
lowerCamelCase : Optional[int] =0
lowerCamelCase : List[Any] =0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : str =1
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : Optional[int] =deltmpscorea / len(SCREAMING_SNAKE_CASE_ )
# ADDITION
lowerCamelCase : List[Any] =set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =set(SCREAMING_SNAKE_CASE_ ) & set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] =set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase : int =1
lowerCamelCase : List[Any] =1
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : str =addtmpscore / len(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : List[str] =addtmpscore / len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] =0
if addscore_precision > 0 or addscore_recall > 0:
lowerCamelCase : Optional[Any] =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
lowerCamelCase : Optional[int] =len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict =ssent.split(''' ''' )
lowerCamelCase : Any =csent.split(''' ''' )
lowerCamelCase : str =[]
lowerCamelCase : Optional[Any] =[]
lowerCamelCase : List[Any] =[]
lowerCamelCase : List[str] =[]
lowerCamelCase : Tuple =[]
lowerCamelCase : Optional[Any] =[]
lowerCamelCase : int =[]
lowerCamelCase : List[str] =[]
lowerCamelCase : Dict =[]
lowerCamelCase : Any =[]
for rsent in rsents:
lowerCamelCase : Any =rsent.split(''' ''' )
lowerCamelCase : int =[]
lowerCamelCase : Optional[Any] =[]
lowerCamelCase : List[Any] =[]
ragramslist.append(SCREAMING_SNAKE_CASE_ )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase : Optional[int] =ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 2:
lowerCamelCase : str =ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 3:
lowerCamelCase : int =ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(SCREAMING_SNAKE_CASE_ )
ragramslist.append(SCREAMING_SNAKE_CASE_ )
ragramslist.append(SCREAMING_SNAKE_CASE_ )
ragramslist.append(SCREAMING_SNAKE_CASE_ )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase : Optional[int] =sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 2:
lowerCamelCase : List[Any] =sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 3:
lowerCamelCase : Optional[int] =sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(SCREAMING_SNAKE_CASE_ )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCamelCase : Optional[int] =cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 2:
lowerCamelCase : List[str] =cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(SCREAMING_SNAKE_CASE_ )
if i < len(SCREAMING_SNAKE_CASE_ ) - 3:
lowerCamelCase : str =cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Any =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : Optional[Any] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[Any] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[str] =SARIngram(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] =sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCamelCase : List[str] =sum([delascore, delascore, delascore, delascore] ) / 4
lowerCamelCase : int =sum([addascore, addascore, addascore, addascore] ) / 4
lowerCamelCase : Any =(avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "13a" , SCREAMING_SNAKE_CASE_ = True ) -> Any:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
lowerCamelCase : Union[str, Any] =sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCamelCase : List[Any] =sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE_ )()(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Any =sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE_ )
elif tokenizer == "moses":
lowerCamelCase : int =sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE_ , return_str=SCREAMING_SNAKE_CASE_ , escape=SCREAMING_SNAKE_CASE_ )
elif tokenizer == "penn":
lowerCamelCase : Any =sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE_ , return_str=SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Optional[int] =sentence
if not return_str:
lowerCamelCase : Union[str, Any] =normalized_sent.split()
return normalized_sent
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if not (len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
lowerCamelCase : Dict =0
for src, pred, refs in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE_ ) , normalize(SCREAMING_SNAKE_CASE_ ) , [normalize(SCREAMING_SNAKE_CASE_ ) for sent in refs] )
lowerCamelCase : str =sari_score / len(SCREAMING_SNAKE_CASE_ )
return 1_0_0 * sari_score
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="exp" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ) -> Dict:
lowerCamelCase : Optional[int] =len(references[0] )
if any(len(SCREAMING_SNAKE_CASE_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowerCamelCase : Optional[int] =[[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE_ )]
lowerCamelCase : Union[str, Any] =sacrebleu.corpus_bleu(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , smooth_method=SCREAMING_SNAKE_CASE_ , smooth_value=SCREAMING_SNAKE_CASE_ , force=SCREAMING_SNAKE_CASE_ , lowercase=SCREAMING_SNAKE_CASE_ , use_effective_order=SCREAMING_SNAKE_CASE_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case_ ( datasets.Metric):
def __lowercase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowercase ( self , __lowercase , __lowercase , __lowercase ) -> Tuple:
lowerCamelCase : str ={}
result.update({'''sari''': compute_sari(sources=__lowercase , predictions=__lowercase , references=__lowercase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__lowercase , references=__lowercase )} )
result.update({'''exact''': compute_em(predictions=__lowercase , references=__lowercase )} )
return result
| 262 | 1 |
"""simple docstring"""
from typing import Any
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
_validation(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,)
# Creates data structures and fill initial step
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for state in states_space:
_UpperCAmelCase = observations_space[0]
_UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 ,len(lowercase ) ):
_UpperCAmelCase = observations_space[o]
_UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_UpperCAmelCase = """"""
_UpperCAmelCase = -1
for k_state in states_space:
_UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_UpperCAmelCase = probability
_UpperCAmelCase = k_state
# Update probabilities and pointers dicts
_UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_UpperCAmelCase = arg_max
# The final observation
_UpperCAmelCase = observations_space[len(lowercase ) - 1]
# argmax for given final observation
_UpperCAmelCase = """"""
_UpperCAmelCase = -1
for k_state in states_space:
_UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
_UpperCAmelCase = probability
_UpperCAmelCase = k_state
_UpperCAmelCase = arg_max
# Process pointers backwards
_UpperCAmelCase = last_state
_UpperCAmelCase = []
for o in range(len(lowercase ) - 1 ,-1 ,-1 ):
result.append(lowercase )
_UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
_validate_not_empty(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,)
_validate_lists(lowercase ,lowercase )
_validate_dicts(
lowercase ,lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_validate_list(lowercase ,"""observations_space""" )
_validate_list(lowercase ,"""states_space""" )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if not isinstance(_object ,lowercase ):
_UpperCAmelCase = f'''{var_name} must be a list'''
raise ValueError(lowercase )
else:
for x in _object:
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = f'''{var_name} must be a list of strings'''
raise ValueError(lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
_validate_dict(lowercase ,"""initial_probabilities""" ,lowercase )
_validate_nested_dict(lowercase ,"""transition_probabilities""" )
_validate_nested_dict(lowercase ,"""emission_probabilities""" )
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_validate_dict(_object ,lowercase ,lowercase )
for x in _object.values():
_validate_dict(lowercase ,lowercase ,lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase = False ):
"""simple docstring"""
if not isinstance(_object ,lowercase ):
_UpperCAmelCase = f'''{var_name} must be a dict'''
raise ValueError(lowercase )
if not all(isinstance(lowercase ,lowercase ) for x in _object ):
_UpperCAmelCase = f'''{var_name} all keys must be strings'''
raise ValueError(lowercase )
if not all(isinstance(lowercase ,lowercase ) for x in _object.values() ):
_UpperCAmelCase = """nested dictionary """ if nested else """"""
_UpperCAmelCase = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 277 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class a ( lowerCAmelCase_ ):
_snake_case : Union[PIL.Image.Image, np.ndarray]
class a ( lowerCAmelCase_ ):
def __init__( self : Dict , __lowerCAmelCase : PriorTransformer , __lowerCAmelCase : CLIPVisionModel , __lowerCAmelCase : CLIPImageProcessor , __lowerCAmelCase : HeunDiscreteScheduler , __lowerCAmelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=__lowerCAmelCase , image_encoder=__lowerCAmelCase , image_processor=__lowerCAmelCase , scheduler=__lowerCAmelCase , renderer=__lowerCAmelCase , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
if latents is None:
_UpperCAmelCase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_UpperCAmelCase = latents.to(__lowerCAmelCase )
_UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_UpperCAmelCase = torch.device(f'''cuda:{gpu_id}''' )
_UpperCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[str] ):
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Any , ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(__lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(__lowerCAmelCase , axis=0 )
if not isinstance(__lowerCAmelCase , torch.Tensor ):
_UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_UpperCAmelCase = image.to(dtype=self.image_encoder.dtype , device=__lowerCAmelCase )
_UpperCAmelCase = self.image_encoder(__lowerCAmelCase )["""last_hidden_state"""]
_UpperCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_UpperCAmelCase = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 25 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : int = 64 , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ):
if isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(__lowerCAmelCase , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_UpperCAmelCase = len(__lowerCAmelCase )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__lowerCAmelCase )}''' )
_UpperCAmelCase = self._execution_device
_UpperCAmelCase = batch_size * num_images_per_prompt
_UpperCAmelCase = guidance_scale > 1.0
_UpperCAmelCase = self._encode_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# prior
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
_UpperCAmelCase = self.scheduler.timesteps
_UpperCAmelCase = self.prior.config.num_embeddings
_UpperCAmelCase = self.prior.config.embedding_dim
_UpperCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_UpperCAmelCase = latents.reshape(latents.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = self.prior(
__lowerCAmelCase , timestep=__lowerCAmelCase , proj_embedding=__lowerCAmelCase , ).predicted_image_embedding
# remove the variance
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_UpperCAmelCase = self.scheduler.step(
__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__lowerCAmelCase )
_UpperCAmelCase = []
for i, latent in enumerate(__lowerCAmelCase ):
print()
_UpperCAmelCase = self.renderer.decode(
latent[None, :] , __lowerCAmelCase , size=__lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__lowerCAmelCase )
_UpperCAmelCase = torch.stack(__lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_UpperCAmelCase = images.cpu().numpy()
if output_type == "pil":
_UpperCAmelCase = [self.numpy_to_pil(__lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__lowerCAmelCase )
| 277 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class snake_case__ ( lowercase_ , lowercase_):
'''simple docstring'''
lowerCamelCase : Optional[Any] = "bit"
lowerCamelCase : Optional[int] = ["preactivation", "bottleneck"]
lowerCamelCase : str = ["SAME", "VALID"]
def __init__( self , a__=3 , a__=64 , a__=[2_56, 5_12, 10_24, 20_48] , a__=[3, 4, 6, 3] , a__="preactivation" , a__="relu" , a__=None , a__=32 , a__=0.0 , a__=False , a__=32 , a__=1 , a__=None , a__=None , **a__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__snake_case :List[str] = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
__snake_case :int = num_channels
__snake_case :Optional[int] = embedding_size
__snake_case :str = hidden_sizes
__snake_case :str = depths
__snake_case :Any = layer_type
__snake_case :str = hidden_act
__snake_case :int = global_padding
__snake_case :Any = num_groups
__snake_case :List[Any] = drop_path_rate
__snake_case :Dict = embedding_dynamic_padding
__snake_case :str = output_stride
__snake_case :List[Any] = width_factor
__snake_case :Any = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(__UpperCamelCase ) + 1 )]
__snake_case , __snake_case :Tuple = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 707 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Tuple = "dandelin/vilt-b32-finetuned-vqa"
lowerCamelCase : List[str] = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
lowerCamelCase : Optional[Any] = "image_qa"
lowerCamelCase : str = AutoProcessor
lowerCamelCase : Union[str, Any] = AutoModelForVisualQuestionAnswering
lowerCamelCase : Any = ["image", "text"]
lowerCamelCase : Dict = ["text"]
def __init__( self , *a__ , **a__ ) -> Any:
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*a__ , **a__ )
def __lowercase ( self , a__ , a__ ) -> int:
'''simple docstring'''
return self.pre_processor(a__ , a__ , return_tensors="""pt""" )
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model(**a__ ).logits
def __lowercase ( self , a__ ) -> Tuple:
'''simple docstring'''
__snake_case :str = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 291 | 0 |
import string
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> List[Any]:
'''simple docstring'''
for key in range(len(string.ascii_uppercase)):
__UpperCamelCase : int = ""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase : List[Any] = string.ascii_uppercase.find(_lowercase)
__UpperCamelCase : Optional[int] = num - key
if num < 0:
__UpperCamelCase : Union[str, Any] = num + len(string.ascii_uppercase)
__UpperCamelCase : str = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase : List[Any] = translated + symbol
print(F'Decryption using Key #{key}: {translated}')
def _SCREAMING_SNAKE_CASE ( ) -> Any:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = input("Encrypted message: ")
__UpperCamelCase : List[Any] = message.upper()
decrypt(_lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 557 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase )
| 34 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3_2 , _a=3 , _a=4 , _a=[1_0, 2_0, 3_0, 4_0] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=3_7 , _a="gelu" , _a=1_0 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> Union[str, Any]:
_a : Optional[int] = parent
_a : Optional[int] = batch_size
_a : Any = image_size
_a : Tuple = num_channels
_a : str = num_stages
_a : List[str] = hidden_sizes
_a : str = depths
_a : Dict = is_training
_a : Optional[Any] = use_labels
_a : List[str] = intermediate_size
_a : List[str] = hidden_act
_a : List[str] = num_labels
_a : Union[str, Any] = initializer_range
_a : List[Any] = out_features
_a : Optional[Any] = out_indices
_a : int = scope
def __lowercase ( self ) -> Optional[Any]:
_a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Dict = None
if self.use_labels:
_a : Dict = ids_tensor([self.batch_size] , self.num_labels )
_a : Any = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __lowercase ( self , _a , _a , _a ) -> List[Any]:
_a : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
_a : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __lowercase ( self , _a , _a , _a ) -> Optional[int]:
_a : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
_a : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , _a , _a , _a ) -> List[str]:
_a : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
_a : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
_a : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.prepare_config_and_inputs()
_a : Tuple = config_and_inputs
_a : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : str = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = ConvNextModelTester(self )
_a : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=3_7 )
def __lowercase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self ) -> Optional[Any]:
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
_a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(__a )
_a : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[Any] = [*signature.parameters.keys()]
_a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def __lowercase ( self ) -> str:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __lowercase ( self ) -> int:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def __lowercase ( self ) -> Optional[int]:
def check_hidden_states_output(_a , _a , _a ):
_a : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(__a , __a ) )
_a : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Tuple = True
check_hidden_states_output(__a , __a , __a )
def __lowercase ( self ) -> Optional[Any]:
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __lowercase ( self ) -> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(__a )
_a : Dict = self.default_image_processor
_a : Union[str, Any] = prepare_img()
_a : Optional[Any] = image_processor(images=__a , return_tensors='''pt''' ).to(__a )
# forward pass
with torch.no_grad():
_a : Any = model(**__a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __a )
_a : Tuple = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = ConvNextConfig
UpperCAmelCase__ : Optional[Any] = False
def __lowercase ( self ) -> int:
_a : Dict = ConvNextModelTester(self )
| 720 |
from __future__ import annotations
a__ = 10
def __UpperCAmelCase ( __a : list[int] ) -> list[int]:
"""simple docstring"""
_a : Union[str, Any] = 1
_a : str = max(__a )
while placement <= max_digit:
# declare and initialize empty buckets
_a : list[list] = [[] for _ in range(__a )]
# split list_of_ints between the buckets
for i in list_of_ints:
_a : Optional[Any] = int((i / placement) % RADIX )
buckets[tmp].append(__a )
# put each buckets' contents into list_of_ints
_a : int = 0
for b in range(__a ):
for i in buckets[b]:
_a : Dict = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 578 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.