code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """vit_msn"""
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-06 , UpperCamelCase__ : Any=224 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : List[str]=True , **UpperCamelCase__ : List[Any] , ) -> int:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = qkv_bias
| 88 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
'''simple docstring'''
__magic_name__ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""", type=A_, default=1, help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""", type=A_, help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
), )
# rest from the training program
parser.add_argument("""training_script_args""", nargs=A_ )
return parser.parse_args()
def a__ ( ):
'''simple docstring'''
__magic_name__ = parse_args()
# Import training_script as a module.
__magic_name__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__magic_name__ = script_fpath.stem
__magic_name__ = importlib.import_module(A_ )
# Patch sys.argv
__magic_name__ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 88 | 1 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : list[list[int]] ):
"""simple docstring"""
UpperCamelCase__ : Tuple = len(SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
UpperCamelCase__ : Tuple = [[0 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
UpperCamelCase__ : Any = run_maze(SCREAMING_SNAKE_CASE , 0 , 0 , SCREAMING_SNAKE_CASE )
if solved:
print('''\n'''.join(str(SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _a ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ):
"""simple docstring"""
UpperCamelCase__ : Tuple = len(SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
UpperCamelCase__ : Optional[int] = 1
return True
UpperCamelCase__ : List[Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCamelCase__ : Dict = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCamelCase__ : Tuple = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCamelCase__ : List[Any] = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE , i + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j + 1 , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , i - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j - 1 , SCREAMING_SNAKE_CASE )
):
return True
UpperCamelCase__ : List[str] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __lowerCAmelCase):
A: Optional[int] = ["image_processor", "tokenizer"]
A: int = "FlavaImageProcessor"
A: List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : int ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
UpperCamelCase__ : Union[str, Any] = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[str] = self.image_processor
def __call__( self : int , lowerCamelCase__ : Optional[ImageInput] = None , lowerCamelCase__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : List[str] , ) -> Any:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase__ : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
if images is not None:
UpperCamelCase__ : Optional[int] = self.image_processor(
lowerCamelCase__ , return_image_mask=lowerCamelCase__ , return_codebook_pixels=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCamelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Dict , *lowerCamelCase__ : Dict , **lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.tokenizer.model_input_names
UpperCamelCase__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase__ , )
return self.image_processor
| 51 | 1 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : list[int] , _lowercase : list[int] , _lowercase : list[list[str]] , _lowercase : int , ) ->None:
'''simple docstring'''
a : Optional[int] = len(_lowercase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowercase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowercase , _lowercase , )
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->None:
'''simple docstring'''
a : list[list[str]] = []
depth_first_search([] , [] , [] , _lowercase , _lowercase )
# Print all the boards
for board in boards:
for column in board:
print(_lowercase )
print("" )
print(len(_lowercase ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 105 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : int ) ->str:
'''simple docstring'''
a : Optional[Any] = Mock()
a : Dict = conn, Mock()
a : Union[str, Any] = iter([1, None] )
a : Optional[int] = lambda _lowercase : next(_lowercase )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=_lowercase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 105 | 1 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
UpperCamelCase__ = logging.getLogger(__name__)
UpperCamelCase__ = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class lowerCamelCase_ ( lowerCamelCase__ ):
lowerCAmelCase__ = 'bertabs'
def __init__( self : List[str] , _A : Any=30_522 , _A : Any=512 , _A : Optional[Any]=6 , _A : Union[str, Any]=512 , _A : int=8 , _A : Any=512 , _A : Any=0.2 , _A : List[Any]=6 , _A : Optional[int]=768 , _A : int=8 , _A : Union[str, Any]=2_048 , _A : Dict=0.2 , **_A : Any , ):
'''simple docstring'''
super().__init__(**__snake_case )
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : List[str] = max_pos
UpperCAmelCase__ : str = enc_layers
UpperCAmelCase__ : Optional[int] = enc_hidden_size
UpperCAmelCase__ : Optional[int] = enc_heads
UpperCAmelCase__ : List[str] = enc_ff_size
UpperCAmelCase__ : Optional[Any] = enc_dropout
UpperCAmelCase__ : Dict = dec_layers
UpperCAmelCase__ : List[str] = dec_hidden_size
UpperCAmelCase__ : str = dec_heads
UpperCAmelCase__ : str = dec_ff_size
UpperCAmelCase__ : Any = dec_dropout
| 358 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 0
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_A )
self.assertIsInstance(_A , _A )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , config=_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A )
self.assertIsInstance(_A , _A )
@require_tokenizers
def lowercase_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
with pytest.raises(_A ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : int ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
if isinstance(_A , _A ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A )
else:
self.assertEqual(tokenizer.do_lower_case , _A )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TOKENIZER_MAPPING.values()
UpperCAmelCase__ : Any = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_A )
@require_tokenizers
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A )
@require_tokenizers
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A )
UpperCAmelCase__ : Any = '''Hello, world. How are you?'''
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_A ) , _A )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase__ : Optional[int] = config.pop('''_commit_hash''' , _A )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_A , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase__ : Tuple = get_tokenizer_config(_A )
self.assertDictEqual(_A , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = get_tokenizer_config(_A )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
UpperCAmelCase__ : Optional[int] = CustomTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : Any ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
# Can register in two steps
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_A , slow_tokenizer_class=_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Any = BertTokenizerFast.from_pretrained(_A )
bert_tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaises(_A ):
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : int ):
'''simple docstring'''
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = False
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = NewTokenizer
lowerCAmelCase__ = False
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# If remote code is not set, the default is to use local
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 299 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: str = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 297 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__( nn.Module ):
def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ):
super().__init__()
a : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a : Tuple = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a : Any = [1, 0]
def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ):
a : Dict = hidden_states
a : Tuple = []
a : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a : Tuple = self.transformer_index_for_condition[i]
a : Union[str, Any] = self.transformers[transformer_index](
__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__snake_case )
| 297 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase ( a ):
lowercase__ : Tuple = """unispeech-sat"""
def __init__( self : str , _UpperCamelCase : Tuple=32 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : Tuple=12 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Tuple=3_072 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Union[str, Any]="group" , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : Tuple=(512, 512, 512, 512, 512, 512, 512) , _UpperCamelCase : List[str]=(5, 2, 2, 2, 2, 2, 2) , _UpperCamelCase : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Dict=128 , _UpperCamelCase : Optional[int]=16 , _UpperCamelCase : Tuple=False , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[Any]=0.0_5 , _UpperCamelCase : Union[str, Any]=10 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : str=0.0 , _UpperCamelCase : List[Any]=10 , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : Any=320 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : str=0.1 , _UpperCamelCase : str=100 , _UpperCamelCase : int=256 , _UpperCamelCase : Optional[Any]=256 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : str="mean" , _UpperCamelCase : int=False , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Any=256 , _UpperCamelCase : str=(512, 512, 512, 512, 1_500) , _UpperCamelCase : List[Any]=(5, 3, 3, 1, 1) , _UpperCamelCase : Union[str, Any]=(1, 2, 3, 1, 1) , _UpperCamelCase : Any=512 , _UpperCamelCase : str=0 , _UpperCamelCase : int=1 , _UpperCamelCase : Any=2 , _UpperCamelCase : Optional[Any]=504 , **_UpperCamelCase : str , ) -> int:
'''simple docstring'''
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim )
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = num_clusters
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 360 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("xlm-roberta-base" )
SCREAMING_SNAKE_CASE = "The dog is cute and lives in the garden house"
SCREAMING_SNAKE_CASE = jnp.array([tokenizer.encode(_UpperCamelCase )] )
SCREAMING_SNAKE_CASE = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )["last_hidden_state"]
self.assertEqual(output.shape , _UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _UpperCamelCase , atol=1e-3 ) )
| 206 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
_A = logging.getLogger(__name__)
_A = {"""facebook/bart-base""": BartForConditionalGeneration}
_A = {"""facebook/bart-base""": BartTokenizer}
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=__UpperCAmelCase , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=__UpperCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__UpperCAmelCase , )
parser.add_argument(
"""--config_name""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=__UpperCAmelCase , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""Where to store the final ONNX file.""" )
lowerCAmelCase__ : List[str] = parser.parse_args()
return args
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase="cpu" ) -> Union[str, Any]:
lowerCAmelCase__ : str = model_dict[model_name].from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = tokenizer_dict[model_name].from_pretrained(__UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : int = 0
return huggingface_model, tokenizer
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
model.eval()
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = torch.jit.script(BARTBeamSearchGenerator(__UpperCAmelCase ) )
with torch.no_grad():
lowerCAmelCase__ : Tuple = """My friends are cool but they eat too many carbs."""
lowerCAmelCase__ : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
lowerCAmelCase__ : str = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=__UpperCAmelCase , max_length=__UpperCAmelCase , early_stopping=__UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__UpperCAmelCase , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __UpperCAmelCase , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=__UpperCAmelCase , )
logger.info("""Model exported to {}""".format(__UpperCAmelCase ) )
lowerCAmelCase__ : List[str] = remove_dup_initializers(os.path.abspath(__UpperCAmelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(__UpperCAmelCase ) )
lowerCAmelCase__ : Any = onnxruntime.InferenceSession(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = ort_sess.run(
__UpperCAmelCase , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(__UpperCAmelCase ),
"""max_length""": np.array(__UpperCAmelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def lowercase_ ( ) -> Union[str, Any]:
lowerCAmelCase__ : int = parse_args()
lowerCAmelCase__ : Dict = 5
lowerCAmelCase__ : Tuple = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase__ : Tuple = torch.device(args.device )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = load_model_tokenizer(args.model_name_or_path , __UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(__UpperCAmelCase )
if args.max_length:
lowerCAmelCase__ : Optional[int] = args.max_length
if args.num_beams:
lowerCAmelCase__ : Optional[int] = args.num_beams
if args.output_file_path:
lowerCAmelCase__ : Union[str, Any] = args.output_file_path
else:
lowerCAmelCase__ : Union[str, Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 242 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = "levit"
def __init__( self : List[Any] , UpperCamelCase : List[str]=2_24 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : Any=1 , UpperCamelCase : int=16 , UpperCamelCase : List[str]=[1_28, 2_56, 3_84] , UpperCamelCase : Optional[Any]=[4, 8, 12] , UpperCamelCase : Optional[int]=[4, 4, 4] , UpperCamelCase : str=[16, 16, 16] , UpperCamelCase : Tuple=0 , UpperCamelCase : List[str]=[2, 2, 2] , UpperCamelCase : Optional[int]=[2, 2, 2] , UpperCamelCase : Optional[int]=0.02 , **UpperCamelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : Any = num_channels
lowerCAmelCase__ : int = kernel_size
lowerCAmelCase__ : Any = stride
lowerCAmelCase__ : List[str] = padding
lowerCAmelCase__ : Tuple = hidden_sizes
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : List[Any] = depths
lowerCAmelCase__ : List[str] = key_dim
lowerCAmelCase__ : List[str] = drop_path_rate
lowerCAmelCase__ : List[Any] = patch_size
lowerCAmelCase__ : Dict = attention_ratio
lowerCAmelCase__ : Tuple = mlp_ratio
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : Dict = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Tuple = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowerCAmelCase ( self : List[str] ) -> float:
"""simple docstring"""
return 1E-4
| 242 | 1 |
lowercase_ = [
(10_00, "M"),
(9_00, "CM"),
(5_00, "D"),
(4_00, "CD"),
(1_00, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__snake_case : int = {"""I""": 1, """V""": 5, """X""": 1_0, """L""": 5_0, """C""": 1_0_0, """D""": 5_0_0, """M""": 1_0_0_0}
__snake_case : Tuple = 0
__snake_case : List[Any] = 0
while place < len(__SCREAMING_SNAKE_CASE ):
if (place + 1 < len(__SCREAMING_SNAKE_CASE )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Optional[Any] = []
for arabic, roman in ROMAN:
((__snake_case) , (__snake_case)) : List[str] = divmod(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
result.append(roman * factor )
if number == 0:
break
return "".join(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
lowercase_ = {
"junnyu/roformer_chinese_small": 15_36,
"junnyu/roformer_chinese_base": 15_36,
"junnyu/roformer_chinese_char_small": 5_12,
"junnyu/roformer_chinese_char_base": 5_12,
"junnyu/roformer_small_discriminator": 1_28,
"junnyu/roformer_small_generator": 1_28,
}
lowercase_ = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = PRETRAINED_INIT_CONFIGURATION
A : List[str] = RoFormerTokenizer
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any="[UNK]" , _lowerCAmelCase : int="[SEP]" , _lowerCAmelCase : Optional[int]="[PAD]" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
):
__snake_case : Tuple = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
__snake_case : List[Any] = do_lower_case
__snake_case : Optional[Any] = strip_accents
__snake_case : List[str] = pre_tok_class(**_lowerCAmelCase )
__snake_case : Optional[Any] = do_lower_case
def __getstate__( self : Optional[Any] ):
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : str , _lowerCAmelCase : Dict ):
__snake_case : str = d
__snake_case : int = self.__dict__["""_tokenizer"""].get_vocab()
__snake_case : List[str] = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=None ):
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case__ ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Tuple , ):
__snake_case : Tuple = BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
| 20 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
snake_case_ : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
snake_case_ : Tuple = parser.parse_args()
snake_case_ : Dict = "cpu"
snake_case_ : Any = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
snake_case_ : Union[str, Any] = "path-to-your-trained-model"
snake_case_ : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
snake_case_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
snake_case_ : Dict = pipe.to(device)
# to channels last
snake_case_ : List[Any] = pipe.unet.to(memory_format=torch.channels_last)
snake_case_ : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
snake_case_ : Tuple = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
snake_case_ : Union[str, Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
snake_case_ : List[Any] = torch.randn(2, 4, 64, 64)
snake_case_ : int = torch.rand(1) * 999
snake_case_ : List[Any] = torch.randn(2, 77, 768)
snake_case_ : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
snake_case_ : Tuple = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
snake_case_ : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
snake_case_ : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
snake_case_ : Dict = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
snake_case_ : Optional[int] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
snake_case_ : str = 666
snake_case_ : List[Any] = torch.Generator(device).manual_seed(seed)
snake_case_ : List[str] = {"generator": generator}
if args.steps is not None:
snake_case_ : List[str] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
snake_case_ : Optional[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 51 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[int] = TypeVar("DatasetType", Dataset, IterableDataset)
def A (__A : List[DatasetType] , __A : Optional[List[float]] = None , __A : Optional[int] = None , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
else:
return _interleave_iterable_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
def A (__A : List[DatasetType] , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__A , info=__A , split=__A , axis=__A )
else:
return _concatenate_iterable_datasets(__A , info=__A , split=__A , axis=__A )
| 51 | 1 |
def __lowerCamelCase ( lowerCamelCase__ = 1_000 ):
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 121 |
from collections import Counter
from timeit import timeit
def __lowerCamelCase ( lowerCamelCase__ = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def __lowerCamelCase ( lowerCamelCase__ = "" ):
"""simple docstring"""
if len(lowerCamelCase__ ) == 0:
return True
lowercase__ : str = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase__ : dict[str, int] = {}
for character in lower_case_input_str:
lowercase__ : List[Any] = character_freq_dict.get(lowerCamelCase__ , 0 ) + 1
lowercase__ : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCamelCase ( lowerCamelCase__ = "" ):
"""simple docstring"""
print("\nFor string = " , lowerCamelCase__ , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(lowerCamelCase__ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(lowerCamelCase__ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
lowerCAmelCase__ = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
lowerCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 121 | 1 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = ['audio_values', 'audio_mask']
def __init__( self : Tuple ,_UpperCAmelCase : str=2048 ,_UpperCAmelCase : List[str]=1 ,_UpperCAmelCase : Any=[16, 16] ,_UpperCAmelCase : Dict=128 ,_UpperCAmelCase : Tuple=44100 ,_UpperCAmelCase : Optional[Any]=86 ,_UpperCAmelCase : str=2048 ,_UpperCAmelCase : Union[str, Any]=0.0 ,**_UpperCAmelCase : List[str] ,):
super().__init__(
feature_size=_UpperCAmelCase ,sampling_rate=_UpperCAmelCase ,padding_value=_UpperCAmelCase ,**_UpperCAmelCase ,)
_a : List[Any] = spectrogram_length
_a : Tuple = num_channels
_a : Dict = patch_size
_a : Optional[Any] = feature_size // self.patch_size[1]
_a : Tuple = n_fft
_a : List[Any] = sampling_rate // hop_length_to_sampling_rate
_a : List[str] = sampling_rate
_a : Union[str, Any] = padding_value
_a : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=_UpperCAmelCase ,min_frequency=0.0 ,max_frequency=2_20_50.0 ,sampling_rate=_UpperCAmelCase ,norm='slaney' ,mel_scale='slaney' ,).T
def __lowercase ( self : Tuple ,_UpperCAmelCase : np.array ):
_a : List[str] = spectrogram(
_UpperCAmelCase ,window_function(self.n_fft ,'hann' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters.T ,log_mel='dB' ,db_range=80.0 ,)
_a : List[Any] = log_spec[:, :-1]
_a : Optional[Any] = log_spec - 20.0
_a : Dict = np.clip(log_spec / 40.0 ,-2.0 ,0.0 ) + 1.0
return log_spec
def __call__( self : int ,_UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCAmelCase : Optional[Union[str, TensorType]] = None ,_UpperCAmelCase : Optional[bool] = True ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Any ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a : Optional[Any] = isinstance(_UpperCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_a : Optional[int] = is_batched_numpy or (
isinstance(_UpperCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
_a : Union[str, Any] = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase ,np.ndarray ):
_a : Optional[int] = np.asarray(_UpperCAmelCase ,dtype=np.floataa )
elif isinstance(_UpperCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a : Dict = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_a : Any = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] ,_UpperCAmelCase ):
_a : Tuple = [np.asarray(_UpperCAmelCase ,dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_a : List[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_a : Dict = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_a : Union[str, Any] = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
_a : Dict = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_a : Union[str, Any] = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_a : List[Any] = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
_a : Any = audio_features[i]
_a : List[Any] = feature
# return as BatchFeature
if return_attention_mask:
_a : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
_a : Tuple = {'audio_values': padded_audio_features}
_a : Optional[Any] = BatchFeature(data=_UpperCAmelCase ,tensor_type=_UpperCAmelCase )
return encoded_inputs
| 89 |
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
return choice(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> int:
"""simple docstring"""
snake_case = random_pivot(_UpperCamelCase )
# partition based on pivot
# linear time
snake_case = [e for e in lst if e < pivot]
snake_case = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_UpperCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_UpperCamelCase ) < k - 1:
return kth_number(_UpperCamelCase , k - len(_UpperCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=False ) -> Any:
"""simple docstring"""
try:
snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case = default
else:
# KEY is set, convert it to True or False.
try:
snake_case = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
SCREAMING_SNAKE_CASE__ = parse_flag_from_env("RUN_SLOW", default=False)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return unittest.skip('Test was skipped' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=None ) -> int:
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCamelCase ) , f"""test requires torch version >= {version}""" )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCamelCase )
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = True
@classmethod
def snake_case ( cls ):
"""simple docstring"""
snake_case = tempfile.mkdtemp()
@classmethod
def snake_case ( cls ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case ( self ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCAmelCase )
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = mocks if isinstance(lowerCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any:
"""simple docstring"""
snake_case = AcceleratorState()
snake_case = tensor[None].clone().to(state.device )
snake_case = gather(_UpperCamelCase ).cpu()
snake_case = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = returncode
snake_case = stdout
snake_case = stderr
async def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
while True:
snake_case = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=False , _UpperCamelCase : Optional[int]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(_UpperCamelCase ) )
snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case = []
snake_case = []
def tee(_UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]="" ):
snake_case = line.decode('utf-8' ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : str=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Tuple=1_8_0 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[Any]=True ) -> _RunOutput:
"""simple docstring"""
snake_case = asyncio.get_event_loop()
snake_case = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
snake_case = ' '.join(_UpperCamelCase )
if result.returncode > 0:
snake_case = '\n'.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
try:
snake_case = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , 'decode' ):
snake_case = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 149 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowerCAmelCase : Any = pd.read_csv('''sample_data.csv''', header=None)
_lowerCAmelCase : Dict = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowerCAmelCase : Dict = df.iloc[:, 1:2]
_lowerCAmelCase : int = actual_data.values.reshape(len_data, 1)
_lowerCAmelCase : str = MinMaxScaler().fit_transform(actual_data)
_lowerCAmelCase : Any = 10
_lowerCAmelCase : Optional[int] = 5
_lowerCAmelCase : Tuple = 20
_lowerCAmelCase : List[Any] = len_data - periods * look_back
_lowerCAmelCase : List[str] = actual_data[:division]
_lowerCAmelCase : Dict = actual_data[division - look_back :]
_lowerCAmelCase : List[str] = [], []
_lowerCAmelCase : Optional[int] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowerCAmelCase : Optional[Any] = np.array(train_x)
_lowerCAmelCase : int = np.array(test_x)
_lowerCAmelCase : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
_lowerCAmelCase : int = np.array([list(i.ravel()) for i in test_y])
_lowerCAmelCase : Any = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
_lowerCAmelCase : List[Any] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_lowerCAmelCase : int = model.predict(x_test)
| 300 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Optional[Any] = 10
def _a (self ):
A_ : Dict = [1, 2, 3, 4]
A_ : List[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A_ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : List[str] = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
A_, A_ : Dict = process_story(lowercase )
self.assertEqual(lowercase , [] )
def _a (self ):
A_ : Optional[int] = """"""
A_, A_ : List[str] = process_story(lowercase )
self.assertEqual(lowercase , [] )
self.assertEqual(lowercase , [] )
def _a (self ):
A_ : Optional[Any] = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
A_, A_ : int = process_story(lowercase )
A_ : Optional[Any] = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(lowercase , lowercase )
A_ : Dict = ["""It was the best of times."""]
self.assertEqual(lowercase , lowercase )
def _a (self ):
A_ : Optional[int] = torch.tensor([1, 2, 3, 4] )
A_ : Dict = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase , 0 ).numpy() , expected.numpy() )
def _a (self ):
A_ : str = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A_ : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 23 ).numpy() , expected.numpy() )
def _a (self ):
A_ : Any = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A_ : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 1 ).numpy() , expected.numpy() )
def _a (self ):
A_ : List[Any] = 101
A_ : List[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A_ : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A_ : Dict = compute_token_type_ids(lowercase , lowercase )
np.testing.assert_array_equal(lowercase , lowercase )
| 206 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : str = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = PegasusTokenizer
__UpperCamelCase = PegasusTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : Optional[Any] = PegasusTokenizer(lowercase_)
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/pegasus-large''')
def _SCREAMING_SNAKE_CASE ( self : Dict , **lowercase_ : Union[str, Any]):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Dict):
'''simple docstring'''
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''</s>'''
SCREAMING_SNAKE_CASE_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<pad>''')
self.assertEqual(vocab_keys[1] , '''</s>''')
self.assertEqual(vocab_keys[-1] , '''v''')
self.assertEqual(len(lowercase_) , 1103)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1103)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : int = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=lowercase_ , add_special_tokens=lowercase_).input_ids[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=lowercase_ , add_special_tokens=lowercase_).input_ids[0]
self.assertListEqual(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE_ : int = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
SCREAMING_SNAKE_CASE_ : Any = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer([raw_input_str] , return_tensors=lowercase_).input_ids[0]
self.assertListEqual(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
SCREAMING_SNAKE_CASE_ : Tuple = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer([raw_input_str] , return_tensors=lowercase_).input_ids[0]
self.assertListEqual(lowercase_ , lowercase_)
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3]) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = ['''This is going to be way too long.''' * 150, '''short example''']
SCREAMING_SNAKE_CASE_ : List[Any] = ['''not super long but more than 5 tokens''', '''tiny''']
SCREAMING_SNAKE_CASE_ : Dict = self._large_tokenizer(lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : int = self._large_tokenizer(
text_target=lowercase_ , max_length=5 , padding=lowercase_ , truncation=lowercase_ , return_tensors='''pt''')
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowercase_) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = {'''input_ids''': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = PegasusTokenizer
__UpperCamelCase = PegasusTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : Optional[int] = PegasusTokenizer(lowercase_ , offset=0 , mask_token_sent=lowercase_ , mask_token='''[MASK]''')
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''')
def _SCREAMING_SNAKE_CASE ( self : List[str] , **lowercase_ : str):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Dict):
'''simple docstring'''
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : int = self.tokenizer_class.from_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer([raw_input_str] , return_tensors=lowercase_ , add_special_tokens=lowercase_).input_ids[0]
SCREAMING_SNAKE_CASE_ : List[Any] = py_tokenizer([raw_input_str] , return_tensors=lowercase_ , add_special_tokens=lowercase_).input_ids[0]
self.assertListEqual(lowercase_ , lowercase_)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = ['''This is going to be way too long.''' * 1000, '''short example''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''not super long but more than 5 tokens''', '''tiny''']
SCREAMING_SNAKE_CASE_ : List[Any] = self._large_tokenizer(lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._large_tokenizer(
text_target=lowercase_ , max_length=5 , padding=lowercase_ , truncation=lowercase_ , return_tensors='''pt''')
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowercase_) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._large_tokenizer(lowercase_).input_ids
self.assertListEqual(
lowercase_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 318 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _A (__a , __a , __a=1e-12 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T
return jnp.matmul(__a , norm_emb_a.T )
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config)
SCREAMING_SNAKE_CASE_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype)
SCREAMING_SNAKE_CASE_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim))
SCREAMING_SNAKE_CASE_ : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,))
def __call__( self : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.vision_model(lowercase_)[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.visual_projection(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.special_care_embeds)
SCREAMING_SNAKE_CASE_ : List[str] = jax_cosine_distance(lowercase_ , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
SCREAMING_SNAKE_CASE_ : Dict = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.round(lowercase_ , 3)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_)
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE_ : Dict = is_special_care * 0.01
SCREAMING_SNAKE_CASE_ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE_ : Any = jnp.round(lowercase_ , 3)
SCREAMING_SNAKE_CASE_ : Dict = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = CLIPConfig
__UpperCamelCase = "clip_input"
__UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , lowercase_ : CLIPConfig , lowercase_ : Optional[Tuple] = None , lowercase_ : int = 0 , lowercase_ : jnp.dtype = jnp.floataa , lowercase_ : bool = True , **lowercase_ : Any , ):
'''simple docstring'''
if input_shape is None:
SCREAMING_SNAKE_CASE_ : List[str] = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_)
super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : jax.random.KeyArray , lowercase_ : Tuple , lowercase_ : FrozenDict = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = jax.random.normal(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {'''params''': params_rng, '''dropout''': dropout_rng}
SCREAMING_SNAKE_CASE_ : List[Any] = self.module.init(lowercase_ , lowercase_)['''params''']
return random_params
def __call__( self : List[Any] , lowercase_ : List[str] , lowercase_ : dict = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = jnp.transpose(lowercase_ , (0, 2, 3, 1))
return self.module.apply(
{'''params''': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa) , rngs={} , )
| 318 | 1 |
import functools
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
lowercase : Any = len(SCREAMING_SNAKE_CASE__ )
@functools.cache
def min_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowercase : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE__ ) , 1 + min_distance(SCREAMING_SNAKE_CASE__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __snake_case ( unittest.TestCase ):
_a : Optional[int]= MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = hf_hub_download(
repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
lowercase : List[str] = VideoClassificationPipeline(model=snake_case ,image_processor=snake_case ,top_k=2 )
lowercase : Dict = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
for example in examples:
lowercase : int = video_classifier(snake_case )
self.assertEqual(
snake_case ,[
{"""score""": ANY(snake_case ), """label""": ANY(snake_case )},
{"""score""": ANY(snake_case ), """label""": ANY(snake_case )},
] ,)
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
lowercase : str = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} ,crop_size={"""height""": 10, """width""": 10} )
lowercase : List[Any] = pipeline(
"""video-classification""" ,model=snake_case ,feature_extractor=snake_case ,frame_sampling_rate=4 )
lowercase : Dict = hf_hub_download(repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
lowercase : Any = video_classifier(snake_case ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] ,)
lowercase : str = video_classifier(
[
video_file_path,
video_file_path,
] ,top_k=2 ,)
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] ,)
@require_tf
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
| 20 | 1 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Optional[Any] , __lowercase : UNetaDModel , __lowercase : UNetaDModel , __lowercase : DDPMScheduler , __lowercase : Union[str, Any] , ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] =value_function
SCREAMING_SNAKE_CASE__ : Tuple =unet
SCREAMING_SNAKE_CASE__ : Union[str, Any] =scheduler
SCREAMING_SNAKE_CASE__ : Tuple =env
SCREAMING_SNAKE_CASE__ : Optional[int] =env.get_dataset()
SCREAMING_SNAKE_CASE__ : Any ={}
for key in self.data.keys():
try:
SCREAMING_SNAKE_CASE__ : List[str] =self.data[key].mean()
except: # noqa: E722
pass
SCREAMING_SNAKE_CASE__ : Any ={}
for key in self.data.keys():
try:
SCREAMING_SNAKE_CASE__ : Tuple =self.data[key].std()
except: # noqa: E722
pass
SCREAMING_SNAKE_CASE__ : str =env.observation_space.shape[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =env.action_space.shape[0]
def __magic_name__ ( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : str ) -> Tuple:
return (x_in - self.means[key]) / self.stds[key]
def __magic_name__ ( self : Dict , __lowercase : Tuple , __lowercase : List[str] ) -> Any:
return x_in * self.stds[key] + self.means[key]
def __magic_name__ ( self : Dict , __lowercase : Optional[int] ) -> Union[str, Any]:
if type(__lowercase ) is dict:
return {k: self.to_torch(__lowercase ) for k, v in x_in.items()}
elif torch.is_tensor(__lowercase ):
return x_in.to(self.unet.device )
return torch.tensor(__lowercase , device=self.unet.device )
def __magic_name__ ( self : Any , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Any ) -> str:
for key, val in cond.items():
SCREAMING_SNAKE_CASE__ : List[Any] =val.clone()
return x_in
def __magic_name__ ( self : Optional[Any] , __lowercase : Tuple , __lowercase : int , __lowercase : Dict , __lowercase : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] =None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
SCREAMING_SNAKE_CASE__ : Any =torch.full((batch_size,) , __lowercase , device=self.unet.device , dtype=torch.long )
for _ in range(__lowercase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.value_function(x.permute(0 , 2 , 1 ) , __lowercase ).sample
SCREAMING_SNAKE_CASE__ : int =torch.autograd.grad([y.sum()] , [x] )[0]
SCREAMING_SNAKE_CASE__ : Tuple =self.scheduler._get_variance(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =torch.exp(0.5 * posterior_variance )
SCREAMING_SNAKE_CASE__ : int =model_std * grad
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =x.detach()
SCREAMING_SNAKE_CASE__ : Dict =x + scale * grad
SCREAMING_SNAKE_CASE__ : Dict =self.reset_xa(__lowercase , __lowercase , self.action_dim )
SCREAMING_SNAKE_CASE__ : List[Any] =self.unet(x.permute(0 , 2 , 1 ) , __lowercase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step(__lowercase , __lowercase , __lowercase , predict_epsilon=__lowercase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
SCREAMING_SNAKE_CASE__ : Optional[int] =self.reset_xa(__lowercase , __lowercase , self.action_dim )
SCREAMING_SNAKE_CASE__ : Tuple =self.to_torch(__lowercase )
return x, y
def __call__( self : int , __lowercase : List[Any] , __lowercase : Optional[Any]=64 , __lowercase : List[Any]=32 , __lowercase : int=2 , __lowercase : int=0.1 ) -> int:
# normalize the observations and create batch dimension
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.normalize(__lowercase , '''observations''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =obs[None].repeat(__lowercase , axis=0 )
SCREAMING_SNAKE_CASE__ : Any ={0: self.to_torch(__lowercase )}
SCREAMING_SNAKE_CASE__ : Dict =(batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
SCREAMING_SNAKE_CASE__ : Dict =randn_tensor(__lowercase , device=self.unet.device )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.reset_xa(__lowercase , __lowercase , self.action_dim )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.to_torch(__lowercase )
# run the diffusion process
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =self.run_diffusion(__lowercase , __lowercase , __lowercase , __lowercase )
# sort output trajectories by value
SCREAMING_SNAKE_CASE__ : Optional[Any] =y.argsort(0 , descending=__lowercase ).squeeze()
SCREAMING_SNAKE_CASE__ : str =x[sorted_idx]
SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_values[:, :, : self.action_dim]
SCREAMING_SNAKE_CASE__ : str =actions.detach().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.de_normalize(__lowercase , key='''actions''' )
# select the action with the highest value
if y is not None:
SCREAMING_SNAKE_CASE__ : str =0
else:
# if we didn't run value guiding, select a random action
SCREAMING_SNAKE_CASE__ : Tuple =np.random.randint(0 , __lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =denorm_actions[selected_index, 0]
return denorm_actions
| 222 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 222 | 1 |
def lowerCamelCase__ ( a , a ) -> bool:
_A: Dict = len(a )
_A: List[str] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_A: Union[str, Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_A: List[str] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_A: Dict = subset[i - 1][j]
if arr[i - 1] <= j:
_A: Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ : Any = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
__SCREAMING_SNAKE_CASE = """LayoutLMv2ImageProcessor"""
__SCREAMING_SNAKE_CASE = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__(self , __a=None , __a=None , **__a ) -> Dict:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __a , )
UpperCAmelCase__ = kwargs.pop('feature_extractor' )
UpperCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__a , __a )
def __call__(self , __a , __a = None , __a = None , __a = None , __a = None , __a = True , __a = False , __a = None , __a = None , __a = 0 , __a = None , __a = None , __a = None , __a = False , __a = False , __a = False , __a = False , __a = True , __a = None , **__a , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCAmelCase__ = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
UpperCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase__ = features['words']
UpperCAmelCase__ = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
UpperCAmelCase__ = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCAmelCase__ = self.get_overflowing_images(__a , encoded_inputs['overflow_to_sample_mapping'] )
UpperCAmelCase__ = images
return encoded_inputs
def UpperCamelCase__ (self , __a , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F" {len(__a )} and {len(__a )}" )
return images_with_overflow
def UpperCamelCase__ (self , *__a , **__a ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ (self , *__a , **__a ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __a , )
return self.image_processor
| 335 |
# flake8: noqa
# Lint as: python3
_UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 335 | 1 |
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Tuple = len(A_ ) # No of vertices in graph
_UpperCAmelCase : List[Any] = [0] * n
_UpperCAmelCase : Union[str, Any] = [False] * n
def dfs(_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any ):
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Tuple = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(A_ , A_ , A_ , id_ )
_UpperCAmelCase : Union[str, Any] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_UpperCAmelCase : str = min(low[at] , low[to] )
_UpperCAmelCase : list[tuple[int, int]] = []
for i in range(A_ ):
if not visited[i]:
dfs(A_ , -1 , A_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a ( UpperCamelCase__):
"""simple docstring"""
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "embed_dim" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "num_heads" ) )
class _a :
"""simple docstring"""
def __init__( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: str=13 , __lowerCamelCase: Tuple=64 , __lowerCamelCase: List[Any]=3 , __lowerCamelCase: List[Any]=[16, 48, 96] , __lowerCamelCase: Union[str, Any]=[1, 3, 6] , __lowerCamelCase: Tuple=[1, 2, 10] , __lowerCamelCase: int=[7, 3, 3] , __lowerCamelCase: Dict=[4, 2, 2] , __lowerCamelCase: int=[2, 1, 1] , __lowerCamelCase: Dict=[2, 2, 2] , __lowerCamelCase: List[str]=[False, False, True] , __lowerCamelCase: str=[0.0, 0.0, 0.0] , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1e-12 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Tuple=True , __lowerCamelCase: Union[str, Any]=2 , ):
'''simple docstring'''
UpperCamelCase__: Dict = parent
UpperCamelCase__: Union[str, Any] = batch_size
UpperCamelCase__: int = image_size
UpperCamelCase__: Dict = patch_sizes
UpperCamelCase__: Any = patch_stride
UpperCamelCase__: Optional[int] = patch_padding
UpperCamelCase__: Any = is_training
UpperCamelCase__: Dict = use_labels
UpperCamelCase__: List[str] = num_labels
UpperCamelCase__: Tuple = num_channels
UpperCamelCase__: int = embed_dim
UpperCamelCase__: int = num_heads
UpperCamelCase__: Dict = stride_kv
UpperCamelCase__: Optional[int] = depth
UpperCamelCase__: int = cls_token
UpperCamelCase__: Optional[Any] = attention_drop_rate
UpperCamelCase__: Tuple = initializer_range
UpperCamelCase__: Dict = layer_norm_eps
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__: Any = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCamelCase__: Tuple = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__: List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = TFCvtModel(config=__lowerCamelCase )
UpperCamelCase__: str = model(__lowerCamelCase , training=__lowerCamelCase )
UpperCamelCase__: Optional[Any] = (self.image_size, self.image_size)
UpperCamelCase__ , UpperCamelCase__: Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCamelCase__: Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCamelCase__: List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: int = self.num_labels
UpperCamelCase__: Tuple = TFCvtForImageClassification(__lowerCamelCase )
UpperCamelCase__: Tuple = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: Dict = config_and_inputs
UpperCamelCase__: int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = TFCvtModelTester(self )
UpperCamelCase__: int = TFCvtConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: int = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(__lowerCamelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__: int = model_class(__lowerCamelCase )
UpperCamelCase__: Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__: List[str] = [*signature.parameters.keys()]
UpperCamelCase__: Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
def check_hidden_states_output(__lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: str ):
UpperCamelCase__: Tuple = model_class(__lowerCamelCase )
UpperCamelCase__: Any = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = outputs.hidden_states
UpperCamelCase__: str = len(self.model_tester.depth )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase__ , UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__: int = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__: Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__: List[Any] = TFCvtModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCAmelCase_ ( ):
UpperCamelCase__: Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase__: str = self.default_image_processor
UpperCamelCase__: List[str] = prepare_img()
UpperCamelCase__: Tuple = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# forward pass
UpperCamelCase__: List[str] = model(**__lowerCamelCase )
# verify the logits
UpperCamelCase__: Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCamelCase__: Optional[Any] = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCamelCase , atol=1e-4 ) )
| 149 | 0 |
"""simple docstring"""
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self ) -> None:
SCREAMING_SNAKE_CASE_ = {} # Mapping from char to TrieNode
SCREAMING_SNAKE_CASE_ = False
def _UpperCamelCase ( self , _A ) -> None:
for word in words:
self.insert(_A )
def _UpperCamelCase ( self , _A ) -> None:
SCREAMING_SNAKE_CASE_ = self
for char in word:
if char not in curr.nodes:
SCREAMING_SNAKE_CASE_ = TrieNode()
SCREAMING_SNAKE_CASE_ = curr.nodes[char]
SCREAMING_SNAKE_CASE_ = True
def _UpperCamelCase ( self , _A ) -> bool:
SCREAMING_SNAKE_CASE_ = self
for char in word:
if char not in curr.nodes:
return False
SCREAMING_SNAKE_CASE_ = curr.nodes[char]
return curr.is_leaf
def _UpperCamelCase ( self , _A ) -> None:
def _delete(_A , _A , _A ) -> bool:
if index == len(_A ):
# If word does not exist
if not curr.is_leaf:
return False
SCREAMING_SNAKE_CASE_ = False
return len(curr.nodes ) == 0
SCREAMING_SNAKE_CASE_ = word[index]
SCREAMING_SNAKE_CASE_ = curr.nodes.get(_A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
SCREAMING_SNAKE_CASE_ = _delete(_A , _A , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _A , 0 )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
if node.is_leaf:
print(__lowerCamelCase, end=''' ''' )
for key, value in node.nodes.items():
print_words(__lowerCamelCase, word + key )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''banana bananas bandana band apple all beast'''.split()
SCREAMING_SNAKE_CASE_ = TrieNode()
root.insert_many(__lowerCamelCase )
# print_words(root, "")
assert all(root.find(__lowerCamelCase ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def A__ ( __lowerCamelCase, __lowerCamelCase ):
print(str(__lowerCamelCase ), '''works!''' if passes else '''doesn\'t work :(''' )
def A__ ( ):
assert test_trie()
def A__ ( ):
print_results('''Testing trie functionality''', test_trie() )
if __name__ == "__main__":
main()
| 368 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCamelCase, __lowerCamelCase, is_trivia_qa=__lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 257 | 0 |
'''simple docstring'''
__lowercase : str = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def lowercase_ ( ) -> None:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = input('''Enter message: ''' )
lowerCamelCase_ : Optional[Any] = input('''Enter key [alphanumeric]: ''' )
lowerCamelCase_ : Tuple = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCamelCase_ : Tuple = '''encrypt'''
lowerCamelCase_ : Dict = encrypt_message(_lowercase , _lowercase )
elif mode.lower().startswith('''d''' ):
lowerCamelCase_ : Optional[int] = '''decrypt'''
lowerCamelCase_ : Dict = decrypt_message(_lowercase , _lowercase )
print(F"""\n{mode.title()}ed message:""" )
print(_lowercase )
def lowercase_ ( _lowercase , _lowercase ) -> str:
'''simple docstring'''
return translate_message(_lowercase , _lowercase , '''encrypt''' )
def lowercase_ ( _lowercase , _lowercase ) -> str:
'''simple docstring'''
return translate_message(_lowercase , _lowercase , '''decrypt''' )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : List[Any] = key.upper()
for symbol in message:
lowerCamelCase_ : Union[str, Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowercase ):
lowerCamelCase_ : Tuple = 0
else:
translated.append(_lowercase )
return "".join(_lowercase )
if __name__ == "__main__":
main()
| 318 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : str = Lock()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase_ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase_ : Any = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase_ : Optional[Any] = temp_rs
lowerCamelCase_ : List[str] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : Any = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase_ : Dict = temp_rs
lowerCamelCase_ : Tuple = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
lowerCamelCase_ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
lowerCamelCase_ : Optional[int] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 318 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : int = len(_lowerCamelCase ) # No of vertices in graph
lowerCamelCase__ : Optional[int] = [0] * n
lowerCamelCase__ : str = [False] * n
def dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , id_ )
lowerCamelCase__ : Any = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCamelCase__ : Optional[Any] = min(low[at] , low[to] )
lowerCamelCase__ : list[tuple[int, int]] = []
for i in range(_lowerCamelCase ):
if not visited[i]:
dfs(_lowerCamelCase , -1 , _lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316 | 0 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_UpperCAmelCase : List[str] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_UpperCAmelCase : Optional[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = calculate_rouge(lowercase , lowercase , bootstrap_aggregation=lowercase , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(lowercase , lowercase )
UpperCamelCase = calculate_rouge(lowercase , lowercase , bootstrap_aggregation=lowercase , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = 'rougeLsum'
UpperCamelCase = calculate_rouge(lowercase , lowercase , newline_sep=lowercase , rouge_keys=[k] )[k]
UpperCamelCase = calculate_rouge(lowercase , lowercase , newline_sep=lowercase , rouge_keys=[k] )[k]
assert score > score_no_sep
def A ( ) -> str:
'''simple docstring'''
UpperCamelCase = ['rouge1', 'rouge2', 'rougeL']
UpperCamelCase = calculate_rouge(lowercase , lowercase , newline_sep=lowercase , rouge_keys=lowercase )
UpperCamelCase = calculate_rouge(lowercase , lowercase , newline_sep=lowercase , rouge_keys=lowercase )
assert score_sep == score_no_sep
def A ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
UpperCamelCase = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(lowercase , lowercase , newline_sep=lowercase ) == calculate_rouge(lowercase , lowercase , newline_sep=lowercase )
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
UpperCamelCase = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
UpperCamelCase = calculate_rouge(lowercase , lowercase , rouge_keys=['rougeLsum'] , newline_sep=lowercase )['rougeLsum']
UpperCamelCase = calculate_rouge(lowercase , lowercase , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = Path('examples/seq2seq/test_data/wmt_en_ro' )
UpperCamelCase = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(lowercase , lowercase )
UpperCamelCase = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=lowercase )
assert isinstance(lowercase , lowercase )
| 222 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowercase ( unittest.TestCase ):
__lowercase : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = VideoClassificationPipeline(model=A_ , image_processor=A_ , top_k=2 )
UpperCamelCase = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
for example in examples:
UpperCamelCase = video_classifier(A_ )
self.assertEqual(
A_ , [
{'score': ANY(A_ ), 'label': ANY(A_ )},
{'score': ANY(A_ ), 'label': ANY(A_ )},
] , )
@require_torch
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCamelCase = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
UpperCamelCase = pipeline(
'video-classification' , model=A_ , feature_extractor=A_ , frame_sampling_rate=4 )
UpperCamelCase = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = video_classifier(A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , )
UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
| 222 | 1 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> dict[str, str]:
'''simple docstring'''
__UpperCamelCase : str = [chr(i + 65) for i in range(26)]
# Remove duplicate characters from key
__UpperCamelCase : Union[str, Any] = remove_duplicates(key.upper())
__UpperCamelCase : Optional[int] = len(_lowerCamelCase)
# First fill cipher with key characters
__UpperCamelCase : Optional[Any] = {alphabet[i]: char for i, char in enumerate(_lowerCamelCase)}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowerCamelCase) , 26):
__UpperCamelCase : Tuple = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__UpperCamelCase : Union[str, Any] = alphabet[i - offset]
__UpperCamelCase : Dict = char
return cipher_alphabet
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : dict[str, str]) -> str:
'''simple docstring'''
return "".join(cipher_map.get(_lowerCamelCase , _lowerCamelCase) for ch in message.upper())
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : dict[str, str]) -> str:
'''simple docstring'''
__UpperCamelCase : int = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowerCamelCase , _lowerCamelCase) for ch in message.upper())
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCamelCase : Optional[int] = input("Enter message to encode or decode: ").strip()
__UpperCamelCase : List[Any] = input("Enter keyword: ").strip()
__UpperCamelCase : Optional[Any] = input("Encipher or decipher? E/D:").strip()[0].lower()
try:
__UpperCamelCase : Any = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option")
__UpperCamelCase : Tuple = create_cipher_map(_lowerCamelCase)
print(func(_lowerCamelCase , _lowerCamelCase))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 151 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]) -> Any:
'''simple docstring'''
__UpperCamelCase : str = set()
__UpperCamelCase : Optional[Any] = []
def parse_line(_lowerCamelCase : Tuple):
for line in fp:
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Tuple = line.decode("UTF-8")
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" "):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCamelCase) > 0:
__UpperCamelCase : Optional[Any] = "\n".join(_lowerCamelCase)
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets):
selected_warnings.add(_lowerCamelCase)
buffer.clear()
continue
else:
__UpperCamelCase : Optional[Any] = line.strip()
buffer.append(_lowerCamelCase)
if from_gh:
for filename in os.listdir(_lowerCamelCase):
__UpperCamelCase : Any = os.path.join(_lowerCamelCase , _lowerCamelCase)
if not os.path.isdir(_lowerCamelCase):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCamelCase) as fp:
parse_line(_lowerCamelCase)
else:
try:
with zipfile.ZipFile(_lowerCamelCase) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCamelCase) as fp:
parse_line(_lowerCamelCase)
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.')
return selected_warnings
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int]) -> Dict:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = set()
__UpperCamelCase : str = [os.path.join(_lowerCamelCase , _lowerCamelCase) for p in os.listdir(_lowerCamelCase) if (p.endswith(".zip") or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase))
return selected_warnings
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple) -> str:
'''simple docstring'''
return values.split(",")
lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
lowercase : Union[str, Any] = parser.parse_args()
lowercase : Tuple = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase : Any = extract_warnings(args.output_dir, args.targets)
lowercase : int = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 151 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: int , UpperCamelCase: List[str]=13 , UpperCamelCase: Optional[Any]=32 , UpperCamelCase: Any=2 , UpperCamelCase: int=3 , UpperCamelCase: Any=16 , UpperCamelCase: Union[str, Any]=[1, 2, 1] , UpperCamelCase: Optional[Any]=[2, 2, 4] , UpperCamelCase: Optional[int]=2 , UpperCamelCase: Dict=2.0 , UpperCamelCase: Dict=True , UpperCamelCase: Optional[Any]=0.0 , UpperCamelCase: Optional[Any]=0.0 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: List[Any]="gelu" , UpperCamelCase: Dict=False , UpperCamelCase: List[str]=True , UpperCamelCase: List[str]=0.02 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: List[Any]=True , UpperCamelCase: str=None , UpperCamelCase: Dict=True , UpperCamelCase: Any=10 , UpperCamelCase: Optional[Any]=8 , UpperCamelCase: Dict=["stage1", "stage2", "stage3"] , UpperCamelCase: Dict=[1, 2, 3] , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = num_heads
A__ = window_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = use_absolute_embeddings
A__ = patch_norm
A__ = layer_norm_eps
A__ = initializer_range
A__ = is_training
A__ = scope
A__ = use_labels
A__ = type_sequence_label_size
A__ = encoder_stride
A__ = out_features
A__ = out_indices
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: Any ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase ( self: List[str] , UpperCamelCase: str , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = MaskFormerSwinModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
A__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: str , UpperCamelCase: Any , UpperCamelCase: str ):
"""simple docstring"""
A__ = MaskFormerSwinBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase ):
A__ = ["""stem"""]
A__ = MaskFormerSwinBackbone(config=UpperCamelCase )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = MaskFormerSwinModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
return
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
pass
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Any , UpperCamelCase: int ):
"""simple docstring"""
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.hidden_states
A__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# Swin has a different seq_length
A__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
A__ = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
A__ = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCamelCase: Any ):
A__ = 0
return t
def check_equivalence(UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Dict={} ):
with torch.no_grad():
A__ = model(**UpperCamelCase , return_dict=UpperCamelCase , **UpperCamelCase )
A__ = model(**UpperCamelCase , return_dict=UpperCamelCase , **UpperCamelCase ).to_tuple()
def recursive_check(UpperCamelCase: List[str] , UpperCamelCase: Any ):
if isinstance(UpperCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase , UpperCamelCase ):
recursive_check(UpperCamelCase , UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase , UpperCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase ) , set_nan_tensor_to_zero(UpperCamelCase ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(UpperCamelCase ).any()} and `inf`: {torch.isinf(UpperCamelCase )}. Dict has"""
f""" `nan`: {torch.isnan(UpperCamelCase ).any()} and `inf`: {torch.isinf(UpperCamelCase )}."""
) , )
recursive_check(UpperCamelCase , UpperCamelCase )
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
check_equivalence(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
check_equivalence(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
check_equivalence(UpperCamelCase , UpperCamelCase , UpperCamelCase , {"""output_hidden_states""": True} )
A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
check_equivalence(UpperCamelCase , UpperCamelCase , UpperCamelCase , {"""output_hidden_states""": True} )
@require_torch
class a ( unittest.TestCase, _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCAmelCase = MaskFormerSwinConfig
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = MaskFormerSwinModelTester(self )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
A__ = backbone_class(UpperCamelCase )
backbone.to(UpperCamelCase )
backbone.eval()
A__ = backbone(**UpperCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
A__ = backbone(**UpperCamelCase , output_hidden_states=UpperCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
A__ , A__ , A__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
A__ = backbone(**UpperCamelCase , output_attentions=UpperCamelCase )
self.assertIsNotNone(outputs.attentions )
| 335 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 1 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowercase : List[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def snake_case ( self : Any, lowerCamelCase : int )-> Optional[Any]:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] =[label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Any, lowerCamelCase : Optional[Any] )-> Optional[int]:
if len(lowerCamelCase ) == 0 or len(lowerCamelCase ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(lowerCamelCase ) )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Tuple =[sequences]
lowerCamelCase__ : Any =[]
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowerCAmelCase_ )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : Optional[Any]=ZeroShotClassificationArgumentHandler(), *lowerCamelCase : Tuple, **lowerCamelCase : List[str] )-> int:
lowerCamelCase__ : Optional[Any] =args_parser
super().__init__(*lowerCamelCase, **lowerCamelCase )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def snake_case ( self : List[Any] )-> Union[str, Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def snake_case ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : Any=True, lowerCamelCase : List[str]=True, lowerCamelCase : int=TruncationStrategy.ONLY_FIRST, **lowerCamelCase : List[Any] )-> List[str]:
lowerCamelCase__ : Tuple =self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
lowerCamelCase__ : Any =self.tokenizer.eos_token
try:
lowerCamelCase__ : Tuple =self.tokenizer(
lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, )
except Exception as e:
if "too short" in str(lowerCamelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCamelCase__ : List[Any] =self.tokenizer(
lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=lowerCamelCase, padding=lowerCamelCase, truncation=TruncationStrategy.DO_NOT_TRUNCATE, )
else:
raise e
return inputs
def snake_case ( self : int, **lowerCamelCase : Tuple )-> Any:
if kwargs.get('''multi_class''', lowerCamelCase ) is not None:
lowerCamelCase__ : Optional[int] =kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
lowerCamelCase__ : Any ={}
if "candidate_labels" in kwargs:
lowerCamelCase__ : Dict =self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
lowerCamelCase__ : Optional[Any] =kwargs['''hypothesis_template''']
lowerCamelCase__ : str ={}
if "multi_label" in kwargs:
lowerCamelCase__ : Optional[Any] =kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self : List[str], lowerCamelCase : Union[str, List[str]], *lowerCamelCase : Union[str, Any], **lowerCamelCase : int, )-> Union[str, Any]:
if len(lowerCamelCase ) == 0:
pass
elif len(lowerCamelCase ) == 1 and "candidate_labels" not in kwargs:
lowerCamelCase__ : int =args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Any, lowerCamelCase : List[str], lowerCamelCase : Any=None, lowerCamelCase : Optional[Any]="This example is {}." )-> Tuple:
lowerCamelCase__ : List[str] =self._args_parser(lowerCamelCase, lowerCamelCase, lowerCamelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase, lowerCamelCase ) ):
lowerCamelCase__ : Optional[Any] =self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase ) - 1,
**model_input,
}
def snake_case ( self : Optional[int], lowerCamelCase : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Dict =inputs['''candidate_label''']
lowerCamelCase__ : Union[str, Any] =inputs['''sequence''']
lowerCamelCase__ : Any ={k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCamelCase__ : Optional[Any] =self.model(**lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def snake_case ( self : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Any=False )-> Union[str, Any]:
lowerCamelCase__ : Optional[Any] =[outputs['''candidate_label'''] for outputs in model_outputs]
lowerCamelCase__ : Tuple =[outputs['''sequence'''] for outputs in model_outputs]
lowerCamelCase__ : Union[str, Any] =np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
lowerCamelCase__ : Dict =logits.shape[0]
lowerCamelCase__ : str =len(lowerCamelCase )
lowerCamelCase__ : List[str] =N // n
lowerCamelCase__ : Any =logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCamelCase__ : Tuple =self.entailment_id
lowerCamelCase__ : Optional[int] =-1 if entailment_id == 0 else 0
lowerCamelCase__ : Union[str, Any] =reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCamelCase__ : List[str] =np.exp(lowerCamelCase ) / np.exp(lowerCamelCase ).sum(-1, keepdims=lowerCamelCase )
lowerCamelCase__ : Tuple =scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCamelCase__ : int =reshaped_outputs[..., self.entailment_id]
lowerCamelCase__ : List[Any] =np.exp(lowerCamelCase ) / np.exp(lowerCamelCase ).sum(-1, keepdims=lowerCamelCase )
lowerCamelCase__ : List[Any] =list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 357 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 272 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str | Literal[False]:
_lowercase : Union[str, Any] = list(a__ )
_lowercase : Optional[int] = list(a__ )
_lowercase : Optional[Any] = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
_lowercase : Any = '_'
if count > 1:
return False
else:
return "".join(a__ )
def UpperCamelCase_( lowerCamelCase_ ) -> list[str]:
_lowercase : str = []
while True:
_lowercase : int = ['$'] * len(a__ )
_lowercase : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 , len(a__ ) ):
_lowercase : List[str] = compare_string(binary[i] , binary[j] )
if k is False:
_lowercase : int = '*'
_lowercase : List[str] = '*'
temp.append('X' )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
_lowercase : List[Any] = list(set(a__ ) )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> list[str]:
_lowercase : Optional[Any] = []
for minterm in minterms:
_lowercase : Any = ''
for _ in range(a__ ):
_lowercase : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
_lowercase : List[Any] = list(a__ )
_lowercase : Optional[int] = list(a__ )
_lowercase : Tuple = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> list[str]:
_lowercase : List[str] = []
_lowercase : Union[str, Any] = [0] * len(a__ )
for i in range(len(chart[0] ) ):
_lowercase : Union[str, Any] = 0
_lowercase : Optional[int] = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
_lowercase : Dict = j
if count == 1:
_lowercase : str = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
_lowercase : Union[str, Any] = 0
temp.append(prime_implicants[i] )
while True:
_lowercase : Union[str, Any] = 0
_lowercase : int = -1
_lowercase : Optional[int] = 0
for i in range(len(a__ ) ):
_lowercase : List[str] = chart[i].count(1 )
if count_n > max_n:
_lowercase : Optional[int] = count_n
_lowercase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
_lowercase : Optional[Any] = 0
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> list[list[int]]:
_lowercase : Union[str, Any] = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
_lowercase : str = prime_implicants[i].count('_' )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , a__ ):
_lowercase : str = 1
return chart
def UpperCamelCase_( ) -> None:
_lowercase : Any = int(input('Enter the no. of variables\n' ) )
_lowercase : Optional[Any] = [
float(a__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_lowercase : Dict = decimal_to_binary(a__ , a__ )
_lowercase : Optional[int] = check(a__ )
print('Prime Implicants are:' )
print(a__ )
_lowercase : List[Any] = prime_implicant_chart(a__ , a__ )
_lowercase : List[str] = selection(a__ , a__ )
print('Essential Prime Implicants are:' )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 21 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ : Optional[int] =getLogger(__name__)
lowerCAmelCase__ : List[str] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
def __lowercase ( a__ , a__ , a__ , a__ = 8 , a__ = DEFAULT_DEVICE , a__=False , a__="summarization" , a__=None , **a__ , ) -> Dict:
__SCREAMING_SNAKE_CASE = Path(a__ ).open('w' , encoding='utf-8' )
__SCREAMING_SNAKE_CASE = str(a__ )
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(a__ ).to(a__ )
if fpaa:
__SCREAMING_SNAKE_CASE = model.half()
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a__ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__SCREAMING_SNAKE_CASE = time.time()
# update config with task specific params
use_task_specific_params(a__ , a__ )
if prefix is None:
__SCREAMING_SNAKE_CASE = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(a__ , a__ ) ) ):
__SCREAMING_SNAKE_CASE = [prefix + text for text in examples_chunk]
__SCREAMING_SNAKE_CASE = tokenizer(a__ , return_tensors='pt' , truncation=a__ , padding='longest' ).to(a__ )
__SCREAMING_SNAKE_CASE = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a__ , )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__SCREAMING_SNAKE_CASE = int(time.time() - start_time ) # seconds
__SCREAMING_SNAKE_CASE = len(a__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __lowercase ( ) -> Any:
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __lowercase ( a__=True ) -> int:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('model_name' , type=a__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=a__ , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=a__ , help='where to save summaries' )
parser.add_argument('--reference_path' , type=a__ , required=a__ , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=a__ , required=a__ , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=a__ , required=a__ , default=a__ , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=a__ , required=a__ , default=a__ , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=a__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=a__ , default=8 , required=a__ , help='batch size' )
parser.add_argument(
'--n_obs' , type=a__ , default=-1 , required=a__ , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=a__ , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_known_args()
__SCREAMING_SNAKE_CASE = parse_numeric_n_bool_cl_kwargs(a__ )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__SCREAMING_SNAKE_CASE = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__SCREAMING_SNAKE_CASE = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__SCREAMING_SNAKE_CASE = generate_summaries_or_translations(
a__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a__ , )
if args.reference_path is None:
return {}
# Compute scores
__SCREAMING_SNAKE_CASE = calculate_bleu if 'translation' in args.task else calculate_rouge
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.save_path ).readlines()]
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a__ )]
__SCREAMING_SNAKE_CASE = score_fn(a__ , a__ )
scores.update(a__ )
if args.dump_args:
scores.update(a__ )
if args.info:
__SCREAMING_SNAKE_CASE = args.info
if verbose:
print(a__ )
if args.score_path is not None:
json.dump(a__ , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 257 | 0 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_SCREAMING_SNAKE_CASE : List[str] = True
except (ImportError, AttributeError):
_SCREAMING_SNAKE_CASE : Union[str, Any] = object
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
pass
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger("transformers-cli/serving")
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
return ServeCommand(UpperCamelCase_ ,args.host ,args.port ,args.workers )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 42
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 42
__magic_name__ = 42
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 42
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 42
class A__ ( snake_case__ ):
"""simple docstring"""
@staticmethod
def a_ ( __snake_case ):
snake_case = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=__snake_case , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=__snake_case , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=__snake_case , default=8_8_8_8 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=__snake_case , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=__snake_case , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=__snake_case , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=__snake_case , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=__snake_case , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=__snake_case )
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = pipeline
snake_case = host
snake_case = port
snake_case = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
snake_case = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=__snake_case , response_class=__snake_case , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=__snake_case , response_class=__snake_case , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=__snake_case , response_class=__snake_case , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=__snake_case , response_class=__snake_case , methods=['''POST'''] , ),
] , timeout=6_0_0 , )
def a_ ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def a_ ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def a_ ( self , __snake_case = Body(__snake_case , embed=__snake_case ) , __snake_case = Body(__snake_case , embed=__snake_case ) ):
try:
snake_case = self._pipeline.tokenizer.tokenize(__snake_case )
if return_ids:
snake_case = self._pipeline.tokenizer.convert_tokens_to_ids(__snake_case )
return ServeTokenizeResult(tokens=__snake_case , tokens_ids=__snake_case )
else:
return ServeTokenizeResult(tokens=__snake_case )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(__snake_case )} )
def a_ ( self , __snake_case = Body(__snake_case , embed=__snake_case ) , __snake_case = Body(__snake_case , embed=__snake_case ) , __snake_case = Body(__snake_case , embed=__snake_case ) , ):
try:
snake_case = self._pipeline.tokenizer.decode(__snake_case , __snake_case , __snake_case )
return ServeDeTokenizeResult(model='''''' , text=__snake_case )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(__snake_case )} )
async def a_ ( self , __snake_case=Body(__snake_case , embed=__snake_case ) ):
# Check we don't have empty string
if len(__snake_case ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
snake_case = self._pipeline(__snake_case )
return ServeForwardResult(output=__snake_case )
except Exception as e:
raise HTTPException(5_0_0 , {'''error''': str(__snake_case )} )
| 213 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Dict = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 213 | 1 |
def lowercase_ ( _A : int ):
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase__ : Tuple = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 184 |
"""simple docstring"""
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
UpperCamelCase : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def A ( snake_case :float , snake_case :str , snake_case :str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(snake_case )}'
)
raise ValueError(snake_case )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_lowerCamelCase : Optional[int] = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 10_000
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ParquetConfig
def A ( self : Dict ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def A ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
UpperCamelCase = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , 'rb' ) as f:
UpperCamelCase = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={'files': files} ) )
return splits
def A ( self : Any , UpperCamelCase__ : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def A ( self : List[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , 'rb' ) as f:
UpperCamelCase = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCamelCase = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}""" )
raise
| 366 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = ''
for i in table:
res += inp[i - 1]
return res
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
return data[1:] + data[0]
def __lowerCamelCase ( A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = ''
for i in range(len(A__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = int('0b' + data[0] + data[-1] , 2 )
UpperCamelCase = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = message[:4]
UpperCamelCase = message[4:]
UpperCamelCase = apply_table(A__ , A__ )
UpperCamelCase = xor(A__ , A__ )
UpperCamelCase = apply_sbox(A__ , temp[:4] ) # noqa: E741
UpperCamelCase = apply_sbox(A__ , temp[4:] )
UpperCamelCase = '0' * (2 - len(A__ )) + l # noqa: E741
UpperCamelCase = '0' * (2 - len(A__ )) + r
UpperCamelCase = apply_table(l + r , A__ )
UpperCamelCase = xor(A__ , A__ )
return temp + right
if __name__ == "__main__":
_lowerCamelCase : str = input("Enter 10 bit key: ")
_lowerCamelCase : Optional[Any] = input("Enter 8 bit message: ")
_lowerCamelCase : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
_lowerCamelCase : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowerCamelCase : Union[str, Any] = [2, 4, 3, 1]
_lowerCamelCase : int = [2, 6, 3, 1, 4, 8, 5, 7]
_lowerCamelCase : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
_lowerCamelCase : Any = [4, 1, 2, 3, 2, 3, 4, 1]
_lowerCamelCase : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowerCamelCase : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowerCamelCase : str = apply_table(key, paa_table)
_lowerCamelCase : str = temp[:5]
_lowerCamelCase : Any = temp[5:]
_lowerCamelCase : Dict = left_shift(left)
_lowerCamelCase : int = left_shift(right)
_lowerCamelCase : Optional[int] = apply_table(left + right, pa_table)
_lowerCamelCase : Optional[int] = left_shift(left)
_lowerCamelCase : Union[str, Any] = left_shift(right)
_lowerCamelCase : Tuple = left_shift(left)
_lowerCamelCase : Optional[int] = left_shift(right)
_lowerCamelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
_lowerCamelCase : Dict = apply_table(message, IP)
_lowerCamelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Any = temp[4:] + temp[:4]
_lowerCamelCase : List[Any] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_lowerCamelCase : List[str] = apply_table(CT, IP)
_lowerCamelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = temp[4:] + temp[:4]
_lowerCamelCase : Any = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Optional[int] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 249 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """dpr"""
def __init__( self : Tuple , lowercase_ : Optional[int]=30_522 , lowercase_ : Dict=768 , lowercase_ : Optional[Any]=12 , lowercase_ : int=12 , lowercase_ : Union[str, Any]=3_072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : Dict=2 , lowercase_ : Any=0.02 , lowercase_ : Any=1E-12 , lowercase_ : str=0 , lowercase_ : Optional[int]="absolute" , lowercase_ : int = 0 , **lowercase_ : Optional[Any] , ) -> Any:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[Any] = projection_dim
UpperCAmelCase : Optional[Any] = position_embedding_type
| 151 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowercase__ = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowercase__ = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowercase__ = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowercase__ = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
lowercase__ = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
lowercase__ = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def UpperCamelCase( UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ):
UpperCAmelCase : str = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase : List[str] = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase : List[str] = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase : str = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase : str = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase : Tuple = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase : List[Any] = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase : Dict = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase : Dict = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : Optional[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : Any = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : int = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : List[str] = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase : Tuple = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : str = checkpoint['time_embed.0.weight']
UpperCAmelCase : Dict = checkpoint['time_embed.0.bias']
UpperCAmelCase : Optional[int] = checkpoint['time_embed.2.weight']
UpperCAmelCase : str = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase : str = checkpoint['label_emb.weight']
UpperCAmelCase : Any = checkpoint['input_blocks.0.0.weight']
UpperCAmelCase : List[str] = checkpoint['input_blocks.0.0.bias']
UpperCAmelCase : Tuple = unet_config['down_block_types']
UpperCAmelCase : Union[str, Any] = unet_config['layers_per_block']
UpperCAmelCase : Dict = unet_config['attention_head_dim']
UpperCAmelCase : Optional[Any] = unet_config['block_out_channels']
UpperCAmelCase : str = 1
UpperCAmelCase : int = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = channels_list[i]
UpperCAmelCase : Any = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
UpperCAmelCase : Any = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase : Any = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase : Dict = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
UpperCAmelCase : str = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase : Optional[Any] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase : Tuple = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase : List[Any] = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : Optional[Any] = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase : List[str] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
UpperCAmelCase : Tuple = current_channels
# hardcoded the mid-block for now
UpperCAmelCase : int = 'mid_block.resnets.0'
UpperCAmelCase : Tuple = 'middle_block.0'
UpperCAmelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = 'mid_block.attentions.0'
UpperCAmelCase : List[Any] = 'middle_block.1'
UpperCAmelCase : Optional[int] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = 'mid_block.resnets.1'
UpperCAmelCase : Dict = 'middle_block.2'
UpperCAmelCase : Union[str, Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[str] = 0
UpperCAmelCase : int = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase : int = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase : List[Any] = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : Any = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase : int = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase : int = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase : int = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase : Tuple = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase : Any = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : str = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase : Optional[Any] = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Any = checkpoint['out.0.weight']
UpperCAmelCase : Optional[int] = checkpoint['out.0.bias']
UpperCAmelCase : Tuple = checkpoint['out.2.weight']
UpperCAmelCase : str = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
lowercase__ = parser.parse_args()
lowercase__ = strabool(args.class_cond)
lowercase__ = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowercase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowercase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
lowercase__ = None
lowercase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowercase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowercase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowercase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
lowercase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowercase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 151 | 1 |
'''simple docstring'''
import torch
def __UpperCAmelCase ( ):
if torch.cuda.is_available():
_UpperCAmelCase : Any = torch.cuda.device_count()
else:
_UpperCAmelCase : Optional[Any] = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 361 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__a = datasets.utils.logging.get_logger(__name__)
__a = ['names', 'prefix']
__a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__a = ['encoding_errors', 'on_bad_lines']
__a = ['date_format']
@dataclass
class A__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase_ : str = ","
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer"
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Union[int, List[int]]] = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[Union[str, List[str]]] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = "."
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = '"'
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : int = 1_00_00
UpperCamelCase_ : Optional[datasets.Features] = None
UpperCamelCase_ : Optional[str] = "strict"
UpperCamelCase_ : Literal["error", "warn", "skip"] = "error"
UpperCamelCase_ : Optional[str] = None
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase : Any = self.delimiter
if self.column_names is not None:
_UpperCAmelCase : List[Any] = self.column_names
@property
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase_ : int = CsvConfig
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
_UpperCAmelCase : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = [files]
_UpperCAmelCase : List[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : str = [files]
_UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase : Tuple = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase : int = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
_UpperCAmelCase : Optional[Any] = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 17 | 0 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[str] = HfApi()
_lowerCamelCase : Tuple = {}
# fmt: off
_lowerCamelCase : Optional[int] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : Union[str, Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Any = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : Union[str, Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Optional[int] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : Optional[Any] = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Any = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : List[Any] = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Any = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : Union[str, Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : Tuple = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Dict = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(f"Started running {mod.modelId}!!!")
if mod.modelId.startswith('CompVis'):
_lowerCamelCase : List[str] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
_lowerCamelCase : Tuple = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : int = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3
)
print(f"{mod.modelId} has passed successfully!!!")
| 258 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 0 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any]=13 , SCREAMING_SNAKE_CASE : Optional[Any]=7 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Any=99 , SCREAMING_SNAKE_CASE : List[Any]=32 , SCREAMING_SNAKE_CASE : Optional[Any]=5 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : Tuple=37 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : str=50 , SCREAMING_SNAKE_CASE : List[Any]=0.02 , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Tuple=None , ):
_A : Optional[Any] = parent
_A : int = batch_size
_A : Any = seq_length
_A : str = is_training
_A : Any = use_input_mask
_A : List[Any] = vocab_size
_A : str = hidden_size
_A : Dict = num_hidden_layers
_A : Any = num_attention_heads
_A : Any = intermediate_size
_A : int = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : Union[str, Any] = initializer_range
_A : int = use_labels
_A : int = scope
def A ( self : Union[str, Any]):
_A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_A : Tuple = None
if self.use_input_mask:
_A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
_A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_A : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def A ( self : List[Any]):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__a , initializer_range=self.initializer_range , )
def A ( self : Any):
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Optional[Any] = self.prepare_config_and_inputs()
_A : List[Any] = True
_A : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any , ):
_A : Union[str, Any] = BertGenerationEncoder(config=__a)
model.to(__a)
model.eval()
_A : Any = model(__a , attention_mask=__a)
_A : Union[str, Any] = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A ( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] , ):
_A : Optional[Any] = True
_A : int = BertGenerationEncoder(config=__a)
model.to(__a)
model.eval()
_A : Dict = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
_A : int = model(
__a , attention_mask=__a , encoder_hidden_states=__a , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A ( self : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
_A : Dict = True
_A : str = True
_A : Dict = BertGenerationDecoder(config=__a).to(__a).eval()
# first forward pass
_A : Any = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , )
_A : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A : int = ids_tensor((self.batch_size, 3) , config.vocab_size)
_A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
_A : Any = torch.cat([input_ids, next_tokens] , dim=-1)
_A : List[Any] = torch.cat([input_mask, next_mask] , dim=-1)
_A : List[Any] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )['hidden_states'][0]
_A : Dict = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )['hidden_states'][0]
# select random slice
_A : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1]).item()
_A : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
_A : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3))
def A ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , ):
_A : Optional[int] = BertGenerationDecoder(__a)
model.to(__a)
model.eval()
_A : Union[str, Any] = model(__a , attention_mask=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A ( self : Tuple):
_A , _A , _A , _A : Dict = self.prepare_config_and_inputs()
_A : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
a = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a = (BertGenerationDecoder,) if is_torch_available() else ()
a = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def A ( self : List[str]):
_A : Optional[Any] = BertGenerationEncoderTester(self)
_A : Tuple = ConfigTester(self , config_class=__a , hidden_size=37)
def A ( self : int):
self.config_tester.run_common_tests()
def A ( self : Union[str, Any]):
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def A ( self : Optional[Any]):
_A , _A , _A , _A : List[Any] = self.model_tester.prepare_config_and_inputs()
_A : Union[str, Any] = 'bert'
self.model_tester.create_and_check_model(__a , __a , __a , __a)
def A ( self : Union[str, Any]):
_A : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a)
def A ( self : List[Any]):
_A : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a)
def A ( self : int):
# This regression test was failing with PyTorch < 1.3
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : int = self.model_tester.prepare_config_and_inputs_for_decoder()
_A : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(
__a , __a , __a , __a , __a , __a , )
def A ( self : Any):
_A : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__a)
@slow
def A ( self : str):
_A : str = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
self.assertIsNotNone(__a)
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Any):
_A : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
_A : List[str] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]])
with torch.no_grad():
_A : Dict = model(__a)[0]
_A : Dict = torch.Size([1, 8, 1024])
self.assertEqual(output.shape , __a)
_A : Dict = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4))
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Tuple):
_A : List[str] = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
_A : Tuple = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]])
with torch.no_grad():
_A : Any = model(__a)[0]
_A : str = torch.Size([1, 8, 50358])
self.assertEqual(output.shape , __a)
_A : Any = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4))
| 368 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : nn.Module , SCREAMING_SNAKE_CASE : int):
super().__init__()
_A : Tuple = module
_A : str = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE) , nn.Linear(SCREAMING_SNAKE_CASE , module.out_features , bias=SCREAMING_SNAKE_CASE) , )
_A : Any = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def A ( self : Any , SCREAMING_SNAKE_CASE : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int):
return self.module(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) + self.adapter(SCREAMING_SNAKE_CASE)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
a = "bigscience/bloom-1b7"
# Constant values
a = 2.109_6595_5269_2574
a = "Hello my name is"
a = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
a = 10
def A ( self : List[Any]):
# Models and tokenizer
_A : List[Any] = AutoTokenizer.from_pretrained(self.model_name)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : List[str]):
super().setUp()
# Models and tokenizer
_A : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto')
_A : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
def A ( self : List[Any]):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A ( self : str):
_A : Any = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'quantization_config'))
_A : str = config.to_dict()
_A : Dict = config.to_diff_dict()
_A : int = config.to_json_string()
def A ( self : List[str]):
from bitsandbytes.nn import Paramsabit
_A : List[str] = self.model_fpaa.get_memory_footprint()
_A : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
_A : str = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def A ( self : List[Any]):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def A ( self : Any):
_A : List[str] = self.tokenizer(self.input_text , return_tensors='pt')
_A : Any = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
def A ( self : List[str]):
_A : List[Any] = BitsAndBytesConfig()
_A : Optional[int] = True
_A : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , device_map='auto')
_A : int = self.tokenizer(self.input_text , return_tensors='pt')
_A : Any = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
def A ( self : Union[str, Any]):
with self.assertRaises(SCREAMING_SNAKE_CASE), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE)
def A ( self : int):
_A : Any = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE):
_A : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto' , bnb_abit_quant_type='nf4' , )
def A ( self : Optional[Any]):
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with `str`
self.model_abit.to('cpu')
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0'))
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_A : str = self.tokenizer(self.input_text , return_tensors='pt')
_A : Optional[Any] = self.model_fpaa.to(torch.floataa)
_A : Optional[int] = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
# Check this does not throw an error
_A : Tuple = self.model_fpaa.to('cpu')
# Check this does not throw an error
_A : Optional[int] = self.model_fpaa.half()
# Check this does not throw an error
_A : int = self.model_fpaa.float()
def A ( self : Dict):
_A : Any = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : Union[str, Any]):
_A : Tuple = 't5-small'
_A : Union[str, Any] = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
_A : str = AutoTokenizer.from_pretrained(cls.model_name)
_A : Optional[int] = 'Translate in German: Hello, my dog is cute'
def A ( self : Any):
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str]):
from transformers import TaForConditionalGeneration
_A : Union[str, Any] = TaForConditionalGeneration._keep_in_fpaa_modules
_A : List[str] = None
# test with `t5-small`
_A : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
_A : int = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : List[str] = model.generate(**SCREAMING_SNAKE_CASE)
# test with `flan-t5-small`
_A : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
_A : Dict = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : Any = model.generate(**SCREAMING_SNAKE_CASE)
_A : List[str] = modules
def A ( self : Dict):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_A : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
_A : int = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : Union[str, Any] = model.generate(**SCREAMING_SNAKE_CASE)
# test with `flan-t5-small`
_A : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
_A : str = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : Optional[int] = model.generate(**SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : Union[str, Any]):
super().setUp()
# model_name
_A : Dict = 'bigscience/bloom-560m'
_A : List[str] = 't5-small'
# Different types of model
_A : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# Sequence classification model
_A : Tuple = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# CausalLM model
_A : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# Seq2seq model
_A : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
def A ( self : str):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : Optional[Any]):
super().setUp()
def A ( self : Any):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[int]):
_A : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_A : Union[str, Any] = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : List[Any]):
super().setUp()
def A ( self : Any):
_A : int = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='balanced')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
_A : Tuple = self.tokenizer(self.input_text , return_tensors='pt')
# Second real batch
_A : Union[str, Any] = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : Optional[Any]):
_A : Any = 'facebook/opt-350m'
super().setUp()
def A ( self : List[Any]):
if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'):
return
# Step 1: freeze all parameters
_A : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
_A : Optional[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_A : Tuple = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE)):
_A : List[Any] = LoRALayer(module.q_proj , rank=16)
_A : Union[str, Any] = LoRALayer(module.k_proj , rank=16)
_A : str = LoRALayer(module.v_proj , rank=16)
# Step 3: dummy batch
_A : Union[str, Any] = self.tokenizer('Test batch ' , return_tensors='pt').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_A : Union[str, Any] = model.forward(**SCREAMING_SNAKE_CASE)
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(SCREAMING_SNAKE_CASE , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "gpt2-xl"
a = 3.3191_8548_5415_2187
| 227 | 0 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_ids', 'attention_mask']
def __init__( self ,__UpperCamelCase="</s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase=125 ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(__UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase_ : Tuple = len(set(filter(lambda __UpperCamelCase : bool('extra_id' in str(__UpperCamelCase ) ) ,__UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else pad_token
lowercase_ : Optional[Any] = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else eos_token
lowercase_ : List[str] = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else unk_token
super().__init__(
eos_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,extra_ids=__UpperCamelCase ,additional_special_tokens=__UpperCamelCase ,**__UpperCamelCase ,)
lowercase_ : str = extra_ids
lowercase_ : List[Any] = 2**8 # utf is 8 bits
# define special tokens dict
lowercase_ : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
lowercase_ : Any = len(self.special_tokens_encoder )
lowercase_ : List[Any] = len(__UpperCamelCase )
for i, token in enumerate(__UpperCamelCase ):
lowercase_ : int = self.vocab_size + i - n
lowercase_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase ,token_ids_a=__UpperCamelCase ,already_has_special_tokens=__UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__UpperCamelCase )) + [1]
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]:
'''simple docstring'''
if len(__UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ : str = self._add_eos_if_not_present(__UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
lowercase_ : int = self._add_eos_if_not_present(__UpperCamelCase )
return token_ids_a + token_ids_a
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = [chr(__UpperCamelCase ) for i in text.encode('utf-8' )]
return tokens
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if token in self.special_tokens_encoder:
lowercase_ : Tuple = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
lowercase_ : Dict = self.added_tokens_encoder[token]
elif len(__UpperCamelCase ) != 1:
lowercase_ : Dict = self.unk_token_id
else:
lowercase_ : List[str] = ord(__UpperCamelCase ) + self._num_special_tokens
return token_id
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
if index in self.special_tokens_decoder:
lowercase_ : int = self.special_tokens_decoder[index]
else:
lowercase_ : int = chr(index - self._num_special_tokens )
return token
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = B''
for token in tokens:
if token in self.special_tokens_decoder:
lowercase_ : Any = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
lowercase_ : int = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
lowercase_ : Optional[int] = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
lowercase_ : List[Any] = token.encode('utf-8' )
else:
lowercase_ : List[str] = bytes([ord(__UpperCamelCase )] )
bstring += tok_string
lowercase_ : Optional[int] = bstring.decode('utf-8' ,errors='ignore' )
return string
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
return ()
| 213 |
"""simple docstring"""
import argparse
import struct
import unittest
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : str = data
# Initialize hash values
lowercase_ : Optional[int] = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
lowercase_ : Tuple = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
lowercase_ : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCAmelCase ( __UpperCamelCase ) -> bytes:
'''simple docstring'''
lowercase_ : str = B'\x80' + (B'\x00' * (63 - (len(__UpperCamelCase ) + 8) % 64))
lowercase_ : str = struct.pack('>Q' ,(len(__UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Optional[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowercase_ : Any = list(struct.unpack('>16L' ,__UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowercase_ : str = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowercase_ : int = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowercase_ : Optional[Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
lowercase_ : Tuple = self.ror(__UpperCamelCase ,6 ) ^ self.ror(__UpperCamelCase ,11 ) ^ self.ror(__UpperCamelCase ,25 )
lowercase_ : Union[str, Any] = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
lowercase_ : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
lowercase_ : Optional[int] = self.ror(__UpperCamelCase ,2 ) ^ self.ror(__UpperCamelCase ,13 ) ^ self.ror(__UpperCamelCase ,22 )
lowercase_ : Optional[Any] = (a & b) ^ (a & c) ^ (b & c)
lowercase_ : Any = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
lowercase_ : str = [a, b, c, d, e, f, g, h]
# Modify final values
lowercase_ : Dict = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
lowercase_ : Any = ''.join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
import hashlib
lowercase_ : Union[str, Any] = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(__UpperCamelCase ).hash ,hashlib.shaaaa(__UpperCamelCase ).hexdigest() )
def lowercase__( ):
import doctest
doctest.testmod()
lowercase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
lowercase_ : Any = parser.parse_args()
lowercase_ : int = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase_ : str = f.read()
else:
lowercase_ : Optional[int] = bytes(__SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(__SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 213 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : list[list[int]] ) -> int:
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_snake_case ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_snake_case ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a = logging.getLogger(__name__)
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str
UpperCAmelCase : List[str]
UpperCAmelCase : Optional[List[str]]
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : List[int]
UpperCAmelCase : List[int]
UpperCAmelCase : Optional[List[int]] = None
UpperCAmelCase : Optional[List[int]] = None
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = '''train'''
UpperCAmelCase : Tuple = '''dev'''
UpperCAmelCase : int = '''test'''
class lowercase_ :
'''simple docstring'''
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : str ):
raise NotImplementedError
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : List[InputExample] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : List[str]="[CLS]" , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Tuple="[SEP]" , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : str=0 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : Any=-100 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=True , ):
_A = {label: i for i, label in enumerate(_UpperCAmelCase )}
_A = []
for ex_index, example in enumerate(_UpperCAmelCase ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' , _UpperCAmelCase , len(_UpperCAmelCase ) )
_A = []
_A = []
for word, label in zip(example.words , example.labels ):
_A = tokenizer.tokenize(_UpperCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_UpperCAmelCase ) > 0:
tokens.extend(_UpperCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_UpperCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_A = tokenizer.num_special_tokens_to_add()
if len(_UpperCAmelCase ) > max_seq_length - special_tokens_count:
_A = tokens[: (max_seq_length - special_tokens_count)]
_A = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_A = [sequence_a_segment_id] * len(_UpperCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_A = [cls_token] + tokens
_A = [pad_token_label_id] + label_ids
_A = [cls_token_segment_id] + segment_ids
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_A = [1 if mask_padding_with_zero else 0] * len(_UpperCAmelCase )
# Zero-pad up to the sequence length.
_A = max_seq_length - len(_UpperCAmelCase )
if pad_on_left:
_A = ([pad_token] * padding_length) + input_ids
_A = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_A = ([pad_token_segment_id] * padding_length) + segment_ids
_A = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(_UpperCAmelCase ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(_UpperCAmelCase ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(_UpperCAmelCase ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(_UpperCAmelCase ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(_UpperCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_A = None
features.append(
InputFeatures(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , label_ids=_UpperCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[InputFeatures]
UpperCAmelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _UpperCAmelCase : TokenClassificationTask , _UpperCAmelCase : str , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Split = Split.train , ):
# Load data features from cache or dataset file
_A = os.path.join(
_UpperCAmelCase , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
_A = torch.load(_UpperCAmelCase )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
_A = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
_A = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , _UpperCAmelCase )
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : int , _UpperCAmelCase : Union[str, Any] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : List[InputFeatures]
UpperCAmelCase : int = -100
def __init__( self : int , _UpperCAmelCase : TokenClassificationTask , _UpperCAmelCase : str , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Split = Split.train , ):
_A = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
_A = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_A = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_A = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCAmelCase_ ( self : Dict ):
_A = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Dict , _UpperCAmelCase : Optional[int] ):
return self.features[i]
| 271 | 1 |
from heapq import heappop, heappush
import numpy as np
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =grid.shape
UpperCAmelCase : str =[-1, 1, 0, 0]
UpperCAmelCase : Union[str, Any] =[0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase : str =[(0, source)], set()
UpperCAmelCase : Any =np.full((rows, cols) , np.inf )
UpperCAmelCase : Dict =0
UpperCAmelCase : Optional[int] =np.empty((rows, cols) , dtype=__UpperCamelCase )
UpperCAmelCase : int =None
while queue:
(UpperCAmelCase) : List[Any] =heappop(__UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase : Dict =[]
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase : Optional[Any] =predecessors[x, y]
path.append(__UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCamelCase ) ):
UpperCAmelCase : int =x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase : List[Any] =grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCamelCase , (dist + 1, (nx, ny)) )
UpperCAmelCase : Optional[int] =dist + 1
UpperCAmelCase : List[Any] =(x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , ) -> List[Any]:
__lowercase : Any = size if size is not None else {'''height''': 18, '''width''': 18}
__lowercase : Dict = parent
__lowercase : Dict = batch_size
__lowercase : int = num_channels
__lowercase : Union[str, Any] = image_size
__lowercase : Optional[int] = min_resolution
__lowercase : List[str] = max_resolution
__lowercase : Dict = do_resize
__lowercase : Any = size
__lowercase : Any = do_normalize
__lowercase : int = image_mean
__lowercase : Tuple = image_std
def _lowerCamelCase ( self ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =DPTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = DPTImageProcessingTester(self )
@property
def _lowerCamelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__lowercase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowerCamelCase ( self ) -> Optional[int]:
# Initialize image_processing
__lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> List[Any]:
# Initialize image_processing
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Any = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> Tuple:
# Initialize image_processing
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : str = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 249 | 0 |
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (position - 1) // 2
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (2 * position) + 1
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (2 * position) + 2
class _lowerCamelCase ( Generic[T] ):
def __init__(self ) -> None:
UpperCamelCase = []
UpperCamelCase = {}
UpperCamelCase = 0
def __len__(self ) -> int:
return self.elements
def __repr__(self ) -> str:
return str(self.heap )
def snake_case_ (self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ (self , __a , __a ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase = self.elements
self.elements += 1
self._bubble_up(__a )
def snake_case_ (self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase , UpperCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase , UpperCamelCase = self.heap[0]
self._bubble_down(__a )
return elem
def snake_case_ (self , __a , __a ) -> None:
# Update the weight of the given key
UpperCamelCase = self.position_map[elem]
UpperCamelCase = (elem, weight)
if position > 0:
UpperCamelCase = get_parent_position(__a )
UpperCamelCase , UpperCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__a )
else:
self._bubble_down(__a )
else:
self._bubble_down(__a )
def snake_case_ (self , __a ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase = get_parent_position(__a )
UpperCamelCase , UpperCamelCase = self.heap[curr_pos]
UpperCamelCase , UpperCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__a , __a )
return self._bubble_up(__a )
return None
def snake_case_ (self , __a ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase = self.position_map[elem]
UpperCamelCase , UpperCamelCase = self.heap[curr_pos]
UpperCamelCase = get_child_left_position(__a )
UpperCamelCase = get_child_right_position(__a )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_left_position]
UpperCamelCase , UpperCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
if child_left_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
else:
return None
if child_right_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
return None
def snake_case_ (self , __a , __a ) -> None:
# Swap the nodes at the given positions
UpperCamelCase = self.heap[nodea_pos][0]
UpperCamelCase = self.heap[nodea_pos][0]
UpperCamelCase , UpperCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase = nodea_pos
UpperCamelCase = nodea_pos
class _lowerCamelCase ( Generic[T] ):
def __init__(self ) -> None:
UpperCamelCase = {}
UpperCamelCase = 0
def __repr__(self ) -> str:
return str(self.connections )
def __len__(self ) -> int:
return self.nodes
def snake_case_ (self , __a ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase = {}
self.nodes += 1
def snake_case_ (self , __a , __a , __a ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__a )
self.add_node(__a )
UpperCamelCase = weight
UpperCamelCase = weight
def a__ ( _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase = {node: maxsize for node in graph.connections}
UpperCamelCase = {node: None for node in graph.connections}
UpperCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase = priority_queue.extract_min()
UpperCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
UpperCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
UpperCamelCase = node
return dist, parent
| 356 |
"""simple docstring"""
import math
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = int(math.floor(math.sqrt(_SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase = 0
while arr[min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - 1] < x:
UpperCamelCase = step
step += int(math.floor(math.sqrt(_SCREAMING_SNAKE_CASE ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCamelCase = prev + 1
if prev == min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
lowerCAmelCase__ = int(input('''Enter the number to be searched:\n'''))
lowerCAmelCase__ = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''')
| 244 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : Union[str, Any] = OPTConfig
__lowercase : Union[str, Any] = {}
__lowercase : List[Any] = '''gelu'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=9_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=1_6 , lowerCAmelCase__=1_6 , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = word_embed_proj_dim
__SCREAMING_SNAKE_CASE = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1)
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase__ , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_opt_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__)
return config, inputs_dict
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TFOPTModel(config=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""]
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :]
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size)
__SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1)
__SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
__SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1]))
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3)
@require_tf
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : List[str] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowercase : Any = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowercase : int = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__lowercase : List[str] = False
__lowercase : Union[str, Any] = False
__lowercase : Tuple = False
__lowercase : Union[str, Any] = 10
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFOPTModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase__ , lowerCAmelCase__):
if hasattr(lowerCAmelCase__ , """weight"""):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase__ , """weight"""):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__SCREAMING_SNAKE_CASE = model_class(config=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings())
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings())
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
__SCREAMING_SNAKE_CASE = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase__)
# check that weights remain the same after resizing
__SCREAMING_SNAKE_CASE = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
__SCREAMING_SNAKE_CASE = False
self.assertTrue(lowerCAmelCase__)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
__SCREAMING_SNAKE_CASE = False
self.assertTrue(lowerCAmelCase__)
def _lowerCAmelCase ( UpperCamelCase_ ):
return tf.constant(UpperCamelCase_ , dtype=tf.intaa )
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
__lowercase : Union[str, Any] = 99
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = tf.ones((4, 1) , dtype=tf.intaa) * 2
__SCREAMING_SNAKE_CASE = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3) + 3, eos_column_vector] , axis=1)
__SCREAMING_SNAKE_CASE = input_ids.shape[0]
__SCREAMING_SNAKE_CASE = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFOPTModel.from_pretrained("""facebook/opt-350m""")
__SCREAMING_SNAKE_CASE = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
__SCREAMING_SNAKE_CASE = tf.not_equal(lowerCAmelCase__ , model.config.pad_token_id)
with tf.GradientTape():
__SCREAMING_SNAKE_CASE = model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__).last_hidden_state
__SCREAMING_SNAKE_CASE = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]])
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-3))
__SCREAMING_SNAKE_CASE = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = xla_generate(lowerCAmelCase__ , lowerCAmelCase__)[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-2))
@require_tf
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(self.path_model)
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(self.path_model)
__SCREAMING_SNAKE_CASE = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""tf""" , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
__SCREAMING_SNAKE_CASE = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4))
__SCREAMING_SNAKE_CASE = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4))
@require_tf
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case_ ( self):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """facebook/opt-125m"""
__SCREAMING_SNAKE_CASE = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__)
for prompt in self.prompts:
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , max_length=1_0)
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """left"""
# use different length sentences to test batching
__SCREAMING_SNAKE_CASE = [
"""Hello, my dog is a little""",
"""Today, I""",
]
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""tf""" , padding=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inputs["""input_ids"""]
__SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["""attention_mask"""])
__SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa))
__SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings)
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
__SCREAMING_SNAKE_CASE = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__)
for prompt in self.prompts:
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , max_length=1_0)
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 100 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ):
__lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 17 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def __A ( self : List[Any] ) -> Dict:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __A ( self : Tuple , __magic_name__ : str ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def __A ( self : int , __magic_name__ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_input_output_texts(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
return text, ids
def __A ( self : List[str] ) -> List[Any]:
pass # TODO add if relevant
def __A ( self : Any ) -> Optional[int]:
pass # TODO add if relevant
def __A ( self : Dict ) -> int:
pass # TODO add if relevant
def __A ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(__magic_name__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __A ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(__magic_name__ , __magic_name__ )
with open(__magic_name__ , "rb" ) as handle:
SCREAMING_SNAKE_CASE_ = pickle.load(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer_new.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE_ = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __A ( self : Dict ) -> List[Any]:
try:
SCREAMING_SNAKE_CASE_ = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __A ( self : Any ) -> Optional[Any]:
try:
SCREAMING_SNAKE_CASE_ = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __A ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = MecabTokenizer(do_lower_case=__magic_name__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __A ( self : int ) -> Any:
try:
SCREAMING_SNAKE_CASE_ = MecabTokenizer(
do_lower_case=__magic_name__ , normalize_text=__magic_name__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = MecabTokenizer(normalize_text=__magic_name__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(__magic_name__ , __magic_name__ )
with open(__magic_name__ , "rb" ) as handle:
SCREAMING_SNAKE_CASE_ = pickle.load(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer_new.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
@require_sudachi
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def __A ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def __A ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def __A ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(do_lower_case=__magic_name__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def __A ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(normalize_text=__magic_name__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def __A ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = SudachiTokenizer(trim_whitespace=__magic_name__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def __A ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(__magic_name__ , __magic_name__ )
with open(__magic_name__ , "rb" ) as handle:
SCREAMING_SNAKE_CASE_ = pickle.load(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer_new.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
@require_jumanpp
def __A ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __A ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = JumanppTokenizer(do_lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __A ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE_ = JumanppTokenizer(normalize_text=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __A ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = JumanppTokenizer(trim_whitespace=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def __A ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def __A ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
SCREAMING_SNAKE_CASE_ = {}
for i, token in enumerate(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = WordpieceTokenizer(vocab=__magic_name__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def __A ( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
SCREAMING_SNAKE_CASE_ = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE_ = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(__magic_name__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
SCREAMING_SNAKE_CASE_ = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(__magic_name__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def __A ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
SCREAMING_SNAKE_CASE_ = tokenizer.encode("ありがとう。" , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode("どういたしまして。" , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = BertJapaneseTokenizer
lowerCamelCase__ = False
def __A ( self : Dict ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __A ( self : Optional[Any] , **__magic_name__ : Dict ) -> Any:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **__magic_name__ )
def __A ( self : Tuple , __magic_name__ : int ) -> Any:
SCREAMING_SNAKE_CASE_ = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def __A ( self : Optional[Any] ) -> str:
pass # TODO add if relevant
def __A ( self : List[Any] ) -> List[str]:
pass # TODO add if relevant
def __A ( self : List[str] ) -> Optional[Any]:
pass # TODO add if relevant
def __A ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
__magic_name__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __A ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE_ = {}
for i, token in enumerate(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = CharacterTokenizer(vocab=__magic_name__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def __A ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
SCREAMING_SNAKE_CASE_ = tokenizer.encode("ありがとう。" , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode("どういたしまして。" , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = "cl-tohoku/bert-base-japanese"
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE_ = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(__magic_name__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
SCREAMING_SNAKE_CASE_ = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(__magic_name__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 305 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = (1, 2, 1)
SCREAMING_SNAKE_CASE_ = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE_ = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model.fit(disp=__UpperCamelCase , maxiter=6_0_0 , method="nm" )
SCREAMING_SNAKE_CASE_ = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = regressor.predict(__UpperCamelCase )
return y_pred[0]
def a__ ( __UpperCamelCase ):
train_user.sort()
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 2_5 )
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 7_5 )
SCREAMING_SNAKE_CASE_ = qa - qa
SCREAMING_SNAKE_CASE_ = qa - (iqr * 0.1)
return low_lim
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE_ = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
A : Optional[Any] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
A : Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : List[str] = normalize_df[:, 0].tolist()
A : int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Tuple = x[: len(x) - 1]
A : str = x[len(x) - 1 :]
# for linear regression & sarimax
A : Tuple = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : List[Any] = total_date[len(total_date) - 1 :]
A : List[Any] = total_user[len(total_user) - 1 :]
A : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : Optional[int] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 305 | 1 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
lowercase__ :int = logging.get_logger(__name__)
lowercase__ :Union[str, Any] = "Hello world! cécé herlolip"
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = FairseqRobertaModel.from_pretrained(lowerCAmelCase__ )
roberta.eval() # disable dropout
lowercase = roberta.model.encoder.sentence_encoder
lowercase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowercase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , lowerCAmelCase__ )
lowercase = XLMRobertaXLForSequenceClassification(lowerCAmelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase = roberta_sent_encoder.embed_tokens.weight
lowercase = roberta_sent_encoder.embed_positions.weight
lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowercase = roberta_sent_encoder.layer_norm.weight
lowercase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase = model.roberta.encoder.layer[i]
lowercase = roberta_sent_encoder.layers[i]
lowercase = layer.attention
lowercase = roberta_layer.self_attn_layer_norm.weight
lowercase = roberta_layer.self_attn_layer_norm.bias
# self attention
lowercase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowercase = roberta_layer.self_attn.q_proj.weight
lowercase = roberta_layer.self_attn.q_proj.bias
lowercase = roberta_layer.self_attn.k_proj.weight
lowercase = roberta_layer.self_attn.k_proj.bias
lowercase = roberta_layer.self_attn.v_proj.weight
lowercase = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowercase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowercase = roberta_layer.self_attn.out_proj.weight
lowercase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowercase = roberta_layer.final_layer_norm.weight
lowercase = roberta_layer.final_layer_norm.bias
# intermediate
lowercase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase = roberta_layer.fca.weight
lowercase = roberta_layer.fca.bias
# output
lowercase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase = roberta_layer.fca.weight
lowercase = roberta_layer.fca.bias
# end of layer
if classification_head:
lowercase = roberta.model.classification_heads['''mnli'''].dense.weight
lowercase = roberta.model.classification_heads['''mnli'''].dense.bias
lowercase = roberta.model.classification_heads['''mnli'''].out_proj.weight
lowercase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowercase = roberta.model.encoder.lm_head.dense.weight
lowercase = roberta.model.encoder.lm_head.dense.bias
lowercase = roberta.model.encoder.lm_head.layer_norm.weight
lowercase = roberta.model.encoder.lm_head.layer_norm.bias
lowercase = roberta.model.encoder.lm_head.weight
lowercase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase = roberta.encode(lowerCAmelCase__ ).unsqueeze(0 ) # batch of size 1
lowercase = model(lowerCAmelCase__ )[0]
if classification_head:
lowercase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(lowerCAmelCase__ ) )
else:
lowercase = roberta.model(lowerCAmelCase__ )[0]
print(our_output.shape , their_output.shape )
lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
lowercase = torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(lowerCAmelCase__ ).mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowercase__ :Optional[int] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 101 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ )
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=lowerCamelCase_ )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=lowerCamelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = self.prepare_image_inputs()
a = image_processor(lowerCamelCase_ , return_tensors="np" )
a = processor(images=lowerCamelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=lowerCamelCase_ )
a = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(lowerCamelCase_ )
a = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 227 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
__a = '''src/transformers'''
# Matches is_xxx_available()
__a = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__a = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__a = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__a = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__a = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__a = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__a = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__a = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__a = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__a = re.compile(r'''^\s*try:''')
# Catches a line with else:
__a = re.compile(r'''^\s*else:''')
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
if _re_test_backend.search(_UpperCamelCase ) is None:
return None
lowercase : Optional[int] = [b[0] for b in _re_backend.findall(_UpperCamelCase )]
backends.sort()
return "_and_".join(_UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->Any:
"""simple docstring"""
with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
lowercase : Union[str, Any] = f.readlines()
lowercase : int = 0
while line_index < len(_UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase : Union[str, Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowercase : Dict = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCamelCase ):
lowercase : Tuple = _re_one_line_import_struct.search(_UpperCamelCase ).groups()[0]
lowercase : Any = re.findall('''\[([^\]]+)\]''', _UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowercase : List[Any] = _re_import_struct_key_value.search(_UpperCamelCase )
if single_line_import_search is not None:
lowercase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowercase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowercase : Any = lines[line_index]
if _re_import_struct_add_one.search(_UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_UpperCamelCase ) is not None:
lowercase : List[Any] = _re_import_struct_add_many.search(_UpperCamelCase ).groups()[0].split(''', ''' )
lowercase : List[str] = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_between_brackets.search(_UpperCamelCase ) is not None:
lowercase : Any = _re_between_brackets.search(_UpperCamelCase ).groups()[0].split(''', ''' )
lowercase : Tuple = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_quote_object.search(_UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(_UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowercase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase : str = []
while (
line_index < len(_UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowercase : Union[str, Any] = lines[line_index]
lowercase : Any = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase : Optional[int] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowercase : Union[str, Any] = lines[line_index]
lowercase : Any = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
def find_duplicates(_UpperCamelCase ):
return [k for k, v in collections.Counter(_UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase : Dict = []
for key in import_dict_objects.keys():
lowercase : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase : Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase : str = '''base imports''' if key == '''none''' else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowercase ( ) ->Optional[int]:
"""simple docstring"""
lowercase : Union[str, Any] = []
for root, _, files in os.walk(_UpperCamelCase ):
if "__init__.py" in files:
lowercase : Union[str, Any] = os.path.join(_UpperCamelCase, '''__init__.py''' )
lowercase : Optional[int] = parse_init(_UpperCamelCase )
if objects is not None:
lowercase : Tuple = analyze_results(*_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Optional[int] = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(_UpperCamelCase ) )
def __lowercase ( ) ->Tuple:
"""simple docstring"""
lowercase : str = []
for path, directories, files in os.walk(_UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowercase : int = str((Path(_UpperCamelCase ) / folder).relative_to(_UpperCamelCase ) )
lowercase : Dict = short_path.replace(os.path.sep, '''.''' )
submodules.append(_UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
lowercase : List[str] = str((Path(_UpperCamelCase ) / fname).relative_to(_UpperCamelCase ) )
lowercase : Optional[Any] = short_path.replace('''.py''', '''''' ).replace(os.path.sep, '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_UpperCamelCase )
return submodules
__a = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def __lowercase ( ) ->Optional[int]:
"""simple docstring"""
lowercase : Tuple = importlib.util.spec_from_file_location(
'''transformers''', os.path.join(_UpperCamelCase, '''__init__.py''' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
lowercase : Any = spec.loader.load_module()
lowercase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_UpperCamelCase ) > 0:
lowercase : Any = '''\n'''.join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 173 |
from maths.prime_factors import prime_factors
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 | 1 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
__lowerCAmelCase = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
__lowerCAmelCase = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
__lowerCAmelCase = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/jitsi/jiwer/'] ,reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] ,)
def __lowercase ( self : Tuple ,_a : Any=None ,_a : List[Any]=None ,_a : List[str]=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(_a ,_a )["wer"]
else:
_a : List[str] = 0
_a : Optional[Any] = 0
for prediction, reference in zip(_a ,_a ):
_a : Dict = compute_measures(_a ,_a )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 271 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,*_a : Optional[int] ,**_a : str ):
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Tuple = ['past_key_values']
__SCREAMING_SNAKE_CASE : Dict = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Any = vocab_size
A_ : List[Any] = hidden_size
A_ : Optional[int] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : Tuple = num_heads
A_ : Optional[Any] = relative_attention_num_buckets
A_ : Dict = relative_attention_max_distance
A_ : List[Any] = dropout_rate
A_ : List[Any] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Tuple = use_cache
A_ : int = eos_token_id
A_ : Optional[int] = decoder_start_token_id
# for backwards compatibility
A_ : Optional[Any] = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : int = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Tuple = patch_embed_hidden_size
A_ : Any = d_ff
A_ : Optional[Any] = dropout_rate
A_ : int = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : Union[str, Any] = initializer_range
A_ : Optional[Any] = initializer_factor
A_ : Dict = attention_dropout
A_ : int = layer_norm_eps
A_ : Optional[int] = dense_act_fn
A_ : Optional[int] = seq_len
A_ : Dict = relative_attention_num_buckets
A_ : str = relative_attention_max_distance
A_ : Any = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : Optional[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : List[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[str] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : List[str] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : int = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : List[str] = PixaStructTextConfig(**lowercase )
A_ : str = PixaStructVisionConfig(**lowercase )
A_ : Tuple = self.text_config.decoder_start_token_id
A_ : List[Any] = self.text_config.pad_token_id
A_ : int = self.text_config.eos_token_id
A_ : int = initializer_factor
A_ : Dict = initializer_range
A_ : Optional[Any] = self.initializer_range
A_ : Optional[int] = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Union[str, Any] = self.text_config.to_dict()
A_ : Tuple = self.vision_config.to_dict()
A_ : Union[str, Any] = self.__class__.model_type
return output
| 135 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase :Tuple = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase=False , lowercase=True , lowercase=False , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<sep>" , lowercase="<pad>" , lowercase="<cls>" , lowercase="<mask>" , lowercase=["<eop>", "<eod>"] , lowercase = None , **lowercase , ):
A_ : Optional[int] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
A_ : List[str] = 3
A_ : List[str] = do_lower_case
A_ : Tuple = remove_space
A_ : Tuple = keep_accents
A_ : Tuple = vocab_file
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
A_ : Tuple = jieba
A_ : int = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _a (self ):
return len(self.sp_model )
def _a (self ):
A_ : str = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
A_ : Optional[Any] = self.__dict__.copy()
A_ : Tuple = None
return state
def __setstate__(self , lowercase ):
A_ : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : List[Any] = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , lowercase ):
if self.remove_space:
A_ : Union[str, Any] = """ """.join(inputs.strip().split() )
else:
A_ : Optional[int] = inputs
A_ : List[str] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A_ : str = unicodedata.normalize("""NFKD""" , lowercase )
A_ : Union[str, Any] = """""".join([c for c in outputs if not unicodedata.combining(lowercase )] )
if self.do_lower_case:
A_ : int = outputs.lower()
return outputs
def _a (self , lowercase ):
A_ : Optional[int] = self.preprocess_text(lowercase )
A_ : Dict = self.sp_model.encode(lowercase , out_type=lowercase )
A_ : List[Any] = []
for piece in pieces:
if len(lowercase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A_ : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ : Dict = cur_pieces[1:]
else:
A_ : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase )
else:
new_pieces.append(lowercase )
return new_pieces
def _a (self , lowercase ):
return self.sp_model.PieceToId(lowercase )
def _a (self , lowercase ):
return self.sp_model.IdToPiece(lowercase )
def _a (self , lowercase ):
A_ : int = """""".join(lowercase ).replace(lowercase , """ """ ).strip()
return out_string
def _a (self , lowercase , lowercase = None ):
A_ : List[str] = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a (self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is not None:
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1, 1]
return ([0] * len(lowercase )) + [1, 1]
def _a (self , lowercase , lowercase = None ):
A_ : str = [self.sep_token_id]
A_ : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a (self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Tuple = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
A_ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def _a (self , *lowercase , **lowercase ):
A_ : Any = super()._decode(*lowercase , **lowercase )
A_ : int = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 135 | 1 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = JukeboxTokenizer
__a = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
import torch
__UpperCAmelCase : Tuple = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
__UpperCAmelCase : List[str] = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
__UpperCAmelCase : Tuple = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
import torch
__UpperCAmelCase : List[Any] = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
__UpperCAmelCase : Any = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
__UpperCAmelCase : Optional[int] = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 115 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __magic_name__ ( __a : Optional[int] , __a : Union[str, Any] , __a : Union[str, Any]=1_024 , __a : str=1_024 , __a : Optional[Any]=False , **__a : Tuple ):
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained(__a )
UpperCamelCase__ = SeqaSeqDataset(__a , __a , __a , __a , type_path="""train""" , **__a )
UpperCamelCase__ = tok.pad_token_id
def get_lens(__a : Optional[int] ):
UpperCamelCase__ = tqdm(
DataLoader(__a , batch_size=512 , num_workers=8 , shuffle=__a , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase__ = []
for batch in dl:
UpperCamelCase__ = batch["""input_ids"""].ne(__a ).sum(1 ).tolist()
UpperCamelCase__ = batch["""labels"""].ne(__a ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__a , __a ):
max_lens.append(max(__a , __a ) )
else:
max_lens.extend(__a )
return max_lens
UpperCamelCase__ = get_lens(__a )
UpperCamelCase__ = SeqaSeqDataset(__a , __a , __a , __a , type_path="""val""" , **__a )
UpperCamelCase__ = get_lens(__a )
pickle_save(__a , train_ds.len_file )
pickle_save(__a , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 244 | 0 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_lowerCamelCase : Optional[int] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
_lowerCamelCase : Any = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCamelCase ( A__ , A__=False ) -> Tuple:
"""simple docstring"""
UpperCamelCase = create_model(
'HTSAT-tiny' , 'roberta' , lowerCAmelCase__ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=lowerCAmelCase__ , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def __lowerCamelCase ( A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = r""".*sequential.(\d+).*"""
UpperCamelCase = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCamelCase = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCamelCase = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCamelCase = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(lowerCAmelCase__ )//3}.linear.""" )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCamelCase = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCamelCase = 1 if projecton_layer == 0 else 2
UpperCamelCase = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCamelCase = value
UpperCamelCase = mixed_qkv.size(0 ) // 3
UpperCamelCase = mixed_qkv[:qkv_dim]
UpperCamelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCamelCase = mixed_qkv[qkv_dim * 2 :]
UpperCamelCase = query_layer
UpperCamelCase = key_layer
UpperCamelCase = value_layer
else:
UpperCamelCase = value
return model_state_dict
def __lowerCamelCase ( A__ , A__ , A__ , A__=False ) -> List[str]:
"""simple docstring"""
UpperCamelCase = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCamelCase = clap_model.state_dict()
UpperCamelCase = rename_state_dict(lowerCAmelCase__ )
UpperCamelCase = ClapConfig()
UpperCamelCase = enable_fusion
UpperCamelCase = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 366 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = ''
for i in table:
res += inp[i - 1]
return res
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
return data[1:] + data[0]
def __lowerCamelCase ( A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = ''
for i in range(len(A__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = int('0b' + data[0] + data[-1] , 2 )
UpperCamelCase = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = message[:4]
UpperCamelCase = message[4:]
UpperCamelCase = apply_table(A__ , A__ )
UpperCamelCase = xor(A__ , A__ )
UpperCamelCase = apply_sbox(A__ , temp[:4] ) # noqa: E741
UpperCamelCase = apply_sbox(A__ , temp[4:] )
UpperCamelCase = '0' * (2 - len(A__ )) + l # noqa: E741
UpperCamelCase = '0' * (2 - len(A__ )) + r
UpperCamelCase = apply_table(l + r , A__ )
UpperCamelCase = xor(A__ , A__ )
return temp + right
if __name__ == "__main__":
_lowerCamelCase : str = input("Enter 10 bit key: ")
_lowerCamelCase : Optional[Any] = input("Enter 8 bit message: ")
_lowerCamelCase : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
_lowerCamelCase : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowerCamelCase : Union[str, Any] = [2, 4, 3, 1]
_lowerCamelCase : int = [2, 6, 3, 1, 4, 8, 5, 7]
_lowerCamelCase : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
_lowerCamelCase : Any = [4, 1, 2, 3, 2, 3, 4, 1]
_lowerCamelCase : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowerCamelCase : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowerCamelCase : str = apply_table(key, paa_table)
_lowerCamelCase : str = temp[:5]
_lowerCamelCase : Any = temp[5:]
_lowerCamelCase : Dict = left_shift(left)
_lowerCamelCase : int = left_shift(right)
_lowerCamelCase : Optional[int] = apply_table(left + right, pa_table)
_lowerCamelCase : Optional[int] = left_shift(left)
_lowerCamelCase : Union[str, Any] = left_shift(right)
_lowerCamelCase : Tuple = left_shift(left)
_lowerCamelCase : Optional[int] = left_shift(right)
_lowerCamelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
_lowerCamelCase : Dict = apply_table(message, IP)
_lowerCamelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Any = temp[4:] + temp[:4]
_lowerCamelCase : List[Any] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_lowerCamelCase : List[str] = apply_table(CT, IP)
_lowerCamelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = temp[4:] + temp[:4]
_lowerCamelCase : Any = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Optional[int] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 249 | 0 |
from __future__ import annotations
A : Optional[Any] = 1.6_0_2_1e-1_9 # units = C
def UpperCamelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return x + 2
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
lowercase__ = """x = y"""
lowercase__ = {"""y""": 5}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 5, """y""": 5} )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = """y = add_two(x)"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
def lowerCamelCase__ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """x = 3\ny = 5"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = """text = f'This is x: {x}.'"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 2} )
lowercase__ = {"""x""": 8}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 8, """y""": 5} )
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [3, 5] )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = """y = x"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 3} )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 0\nfor i in range(3):\n x = i"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {"""range""": range} , state=_UpperCAmelCase )
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 2, """i""": 2} )
| 305 | 1 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowercase : Optional[Any] = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
_lowercase : int = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
_lowercase : Dict = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
def a__( self : List[Any] )-> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def a__( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=False )-> int:
"""simple docstring"""
if rouge_types is None:
UpperCAmelCase = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
UpperCAmelCase = rouge_scorer.RougeScorer(rouge_types=lowerCAmelCase , use_stemmer=lowerCAmelCase )
if use_aggregator:
UpperCAmelCase = scoring.BootstrapAggregator()
else:
UpperCAmelCase = []
for ref, pred in zip(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = scorer.score(lowerCAmelCase , lowerCAmelCase )
if use_aggregator:
aggregator.add_scores(lowerCAmelCase )
else:
scores.append(lowerCAmelCase )
if use_aggregator:
UpperCAmelCase = aggregator.aggregate()
else:
UpperCAmelCase = {}
for key in scores[0]:
UpperCAmelCase = [score[key] for score in scores]
return result
| 371 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : str = logging.get_logger(__name__)
_lowercase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : Tuple = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
_lowercase : Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[Any] = GPTaTokenizer
def __init__( self : Tuple , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Tuple="<|endoftext|>" , lowerCAmelCase : Union[str, Any]="<|endoftext|>" , lowerCAmelCase : Union[str, Any]="<|endoftext|>" , lowerCAmelCase : Optional[int]=False , **lowerCAmelCase : Tuple , )-> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''add_bos_token''' , lowerCAmelCase )
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase = getattr(lowerCAmelCase , pre_tok_state.pop('''type''' ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**lowerCAmelCase )
UpperCAmelCase = add_prefix_space
def a__( self : Union[str, Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def a__( self : List[Any] , lowerCAmelCase : "Conversation" )-> List[int]:
"""simple docstring"""
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 91 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any=13 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=32 , lowerCAmelCase : Optional[Any]=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : List[Any]=10 , lowerCAmelCase : Union[str, Any]=0.0_2 , lowerCAmelCase : Optional[int]="divided_space_time" , lowerCAmelCase : Any=None , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =parent
SCREAMING_SNAKE_CASE_: Optional[Any] =batch_size
SCREAMING_SNAKE_CASE_: List[str] =image_size
SCREAMING_SNAKE_CASE_: int =num_channels
SCREAMING_SNAKE_CASE_: List[str] =patch_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_frames
SCREAMING_SNAKE_CASE_: Optional[Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: Any =intermediate_size
SCREAMING_SNAKE_CASE_: str =hidden_act
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[int] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: str =attention_type
SCREAMING_SNAKE_CASE_: Any =initializer_range
SCREAMING_SNAKE_CASE_: str =scope
SCREAMING_SNAKE_CASE_: List[str] =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE_: Optional[Any] =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Union[str, Any] =(num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: int =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Optional[Any] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE_: Optional[int] =self.num_labels
return config
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =TimesformerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =TimesformerForVideoClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: List[Any] =model(lowerCAmelCase )
# verify the logits shape
SCREAMING_SNAKE_CASE_: Any =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =config_and_inputs
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : List[str] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase : Dict = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : Tuple = False
UpperCamelCase : Optional[int] = False
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =TimesformerModelTester(self )
SCREAMING_SNAKE_CASE_: Dict =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any]=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =copy.deepcopy(lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] =model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_: Optional[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: List[str] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : str ) -> Any:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: int =TimesformerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: int =True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =self.model_tester.seq_length
SCREAMING_SNAKE_CASE_: Any =self.model_tester.num_frames
SCREAMING_SNAKE_CASE_: Dict =True
SCREAMING_SNAKE_CASE_: List[Any] =False
SCREAMING_SNAKE_CASE_: List[str] =True
SCREAMING_SNAKE_CASE_: List[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_: Dict =True
SCREAMING_SNAKE_CASE_: List[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Tuple =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: int =outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE_: str =len(lowerCAmelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_: List[str] =True
SCREAMING_SNAKE_CASE_: int =True
SCREAMING_SNAKE_CASE_: Union[str, Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: int =outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_: Dict =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Any =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: str =outputs.hidden_states
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: int =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[str] =hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =np.load(lowercase )
return list(lowercase )
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.default_image_processor
SCREAMING_SNAKE_CASE_: Union[str, Any] =prepare_video()
SCREAMING_SNAKE_CASE_: str =image_processor(video[:8] , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Dict =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 173 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =10
SCREAMING_SNAKE_CASE_: Dict =datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
SCREAMING_SNAKE_CASE_: Tuple =datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
_UpperCAmelCase = """\
Text data.
Second line of data."""
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """file.txt"""
SCREAMING_SNAKE_CASE_: str =FILE_CONTENT
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import bza
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =bytes(lowercase , """utf-8""" )
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: List[str] =str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
SCREAMING_SNAKE_CASE_: Dict =bytes(lowercase , """utf-8""" )
with gzip.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
SCREAMING_SNAKE_CASE_: List[Any] =bytes(lowercase , """utf-8""" )
with lza.frame.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase , """w""" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import tarfile
SCREAMING_SNAKE_CASE_: List[Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import lzma
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
SCREAMING_SNAKE_CASE_: List[Any] =bytes(lowercase , """utf-8""" )
with lzma.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import zipfile
SCREAMING_SNAKE_CASE_: str =tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
SCREAMING_SNAKE_CASE_: Dict =bytes(lowercase , """utf-8""" )
with zstd.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """file.xml"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
_UpperCAmelCase = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
_UpperCAmelCase = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
_UpperCAmelCase = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
_UpperCAmelCase = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
_UpperCAmelCase = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =datasets.Dataset.from_dict(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
SCREAMING_SNAKE_CASE_: int =con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE_: int =csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE_: Tuple =csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import bza
SCREAMING_SNAKE_CASE_: Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: Optional[int] =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
SCREAMING_SNAKE_CASE_: List[Any] =pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase , """wb""" ) as f:
SCREAMING_SNAKE_CASE_: int =pq.ParquetWriter(lowercase , schema=lowercase )
SCREAMING_SNAKE_CASE_: str =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""data""": DATA}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE_: Tuple ={"""data""": DATA_DICT_OF_LISTS}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: int =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: Dict =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 173 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __A( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__a = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(_snake_case )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1E-3 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
__a = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(_snake_case )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1E-3 ) )
| 33 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A : str = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
A : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase ( ) -> Tuple:
__a = '''https://pypi.org/pypi/diffusers/json'''
__a = json.loads(request.urlopen(a__ ).read() )['''releases'''].keys()
return sorted(a__ , key=lambda a__ : version.Version(a__ ) )
def __lowerCAmelCase ( ) -> List[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a__ )
os.makedirs(a__ , exist_ok=a__ )
__a = Path(a__ ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a__ ) -> Dict:
init_hf_modules()
__a = Path(a__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a__ , exist_ok=a__ )
__a = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __lowerCAmelCase ( a__ ) -> Dict:
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
# Imports of the form `import .xxx`
__a = re.findall('''^\s*import\s+\.(\S+)\s*$''' , a__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , a__ , flags=re.MULTILINE )
# Unique-ify
return list(set(a__ ) )
def __lowerCAmelCase ( a__ ) -> Any:
__a = False
__a = [module_file]
__a = []
# Let's recurse through all relative imports
while not no_change:
__a = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a__ ) )
__a = Path(a__ ).parent
__a = [str(module_path / m ) for m in new_imports]
__a = [f for f in new_import_files if f not in all_relative_imports]
__a = [F"""{f}.py""" for f in new_import_files]
__a = len(a__ ) == 0
all_relative_imports.extend(a__ )
return all_relative_imports
def __lowerCAmelCase ( a__ ) -> str:
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
# Imports of the form `import xxx`
__a = re.findall('''^\s*import\s+(\S+)\s*$''' , a__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , a__ , flags=re.MULTILINE )
# Only keep the top-level module
__a = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__a = list(set(a__ ) )
__a = []
for imp in imports:
try:
importlib.import_module(a__ )
except ImportError:
missing_packages.append(a__ )
if len(a__ ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F"""{', '.join(a__ )}. Run `pip install {' '.join(a__ )}`""" )
return get_relative_imports(a__ )
def __lowerCAmelCase ( a__ , a__ ) -> Dict:
__a = module_path.replace(os.path.sep , '''.''' )
__a = importlib.import_module(a__ )
if class_name is None:
return find_pipeline_class(a__ )
return getattr(a__ , a__ )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
from ..pipelines import DiffusionPipeline
__a = dict(inspect.getmembers(a__ , inspect.isclass ) )
__a = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a__ )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
__a = cls
return pipeline_class
def __lowerCAmelCase ( a__ , a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , ) -> Tuple:
__a = str(a__ )
__a = os.path.join(a__ , a__ )
if os.path.isfile(a__ ):
__a = module_file_or_url
__a = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__a = get_diffusers_versions()
# cut ".dev0"
__a = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__a = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
__a = F"""v{revision}"""
elif revision == "main":
__a = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
__a = COMMUNITY_PIPELINES_URL.format(revision=a__ , pipeline=a__ )
try:
__a = cached_download(
a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
__a = '''git'''
__a = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
__a = hf_hub_download(
a__ , a__ , cache_dir=a__ , force_download=a__ , proxies=a__ , resume_download=a__ , local_files_only=a__ , use_auth_token=a__ , )
__a = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
__a = check_imports(a__ )
# Now we move the module inside our cached dynamic modules.
__a = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a__ )
__a = Path(a__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a__ , submodule_path / module_file )
for module_needed in modules_needed:
__a = F"""{module_needed}.py"""
shutil.copy(os.path.join(a__ , a__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a__ , a__ ):
__a = use_auth_token
elif use_auth_token is True:
__a = HfFolder.get_token()
else:
__a = None
__a = model_info(a__ , revision=a__ , token=a__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__a = submodule_path / commit_hash
__a = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a__ )
if not (submodule_path / module_file).exists():
shutil.copy(a__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a__ , F"""{module_needed}.py""" , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return os.path.join(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ) -> Tuple:
__a = get_cached_module_file(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
return get_class_in_module(a__ , final_module.replace('''.py''' , '''''' ) )
| 33 | 1 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__A = '''\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
'''
__A = '''\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
'''
__A = '''
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"precision": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'precision@10\': 1.0}
'''
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: List[Any] ) -> List[Any]:
'''simple docstring'''
return float((preds == labels).mean() )
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: int ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : int = simple_accuracy(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Optional[int] = float(fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: List[str] ) -> Dict:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = np.array(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = np.array(_lowerCamelCase )
__lowerCamelCase : List[str] = en_sentvecs.shape[0]
# mean centering
__lowerCamelCase : Dict = en_sentvecs - np.mean(_lowerCamelCase , axis=0 )
__lowerCamelCase : Optional[int] = in_sentvecs - np.mean(_lowerCamelCase , axis=0 )
__lowerCamelCase : int = cdist(_lowerCamelCase , _lowerCamelCase , "cosine" )
__lowerCamelCase : str = np.array(range(_lowerCamelCase ) )
__lowerCamelCase : Any = sim.argsort(axis=1 )[:, :10]
__lowerCamelCase : int = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowerCamelCase__ ( self : List[str] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCAmelCase , UpperCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCAmelCase , UpperCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 135 |
"""simple docstring"""
__A = [0, 2, 4, 6, 8]
__A = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: list[int] , _lowerCamelCase: int ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__lowerCamelCase : Union[str, Any] = 0
for digit in range(10 ):
__lowerCamelCase : Tuple = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase )
return result
__lowerCamelCase : List[str] = 0
for digita in range(10 ):
__lowerCamelCase : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
__lowerCamelCase : Any = ODD_DIGITS
else:
__lowerCamelCase : Dict = EVEN_DIGITS
for digita in other_parity_digits:
__lowerCamelCase : int = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase: int = 9 ) -> int:
'''simple docstring'''
__lowerCamelCase : List[Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 135 | 1 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase : str = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def A_ ( A__ ) -> List[Any]:
a__ : Dict = EfficientNetConfig()
a__ : List[str] = CONFIG_MAP[model_name]['hidden_dim']
a__ : List[str] = CONFIG_MAP[model_name]['width_coef']
a__ : Any = CONFIG_MAP[model_name]['depth_coef']
a__ : Union[str, Any] = CONFIG_MAP[model_name]['image_size']
a__ : List[str] = CONFIG_MAP[model_name]['dropout_rate']
a__ : Dict = CONFIG_MAP[model_name]['dw_padding']
a__ : Optional[int] = 'huggingface/label-files'
a__ : List[str] = 'imagenet-1k-id2label.json'
a__ : Any = 1000
a__ : Optional[int] = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
a__ : int = {int(A__ ): v for k, v in idalabel.items()}
a__ : Any = idalabel
a__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def A_ ( ) -> List[Any]:
a__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__ : Optional[int] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
def A_ ( A__ ) -> int:
a__ : List[Any] = CONFIG_MAP[model_name]['image_size']
a__ : Union[str, Any] = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=A__ , )
return preprocessor
def A_ ( A__ ) -> Union[str, Any]:
a__ : Optional[int] = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
a__ : List[str] = sorted(set(A__ ) )
a__ : Optional[Any] = len(A__ )
a__ : List[Any] = {b: str(A__ ) for b, i in zip(A__ , range(A__ ) )}
a__ : str = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
a__ : Tuple = block_name_mapping[b]
rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
a__ : Tuple = {}
for item in rename_keys:
if item[0] in original_param_names:
a__ : Any = 'efficientnet.' + item[1]
a__ : List[str] = 'classifier.weight'
a__ : List[Any] = 'classifier.bias'
return key_mapping
def A_ ( A__ , A__ , A__ ) -> List[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
a__ : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
a__ : Optional[int] = torch.from_numpy(A__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a__ : str = torch.from_numpy(A__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a__ : Optional[Any] = torch.from_numpy(np.transpose(A__ ) )
else:
a__ : str = torch.from_numpy(A__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(A__ )
@torch.no_grad()
def A_ ( A__ , A__ , A__ , A__ ) -> Dict:
a__ : List[Any] = model_classes[model_name](
include_top=A__ , weights='imagenet' , input_tensor=A__ , input_shape=A__ , pooling=A__ , classes=1000 , classifier_activation='softmax' , )
a__ : Union[str, Any] = original_model.trainable_variables
a__ : Optional[int] = original_model.non_trainable_variables
a__ : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a__ : Tuple = param.numpy()
a__ : str = list(tf_params.keys() )
# Load HuggingFace model
a__ : Any = get_efficientnet_config(A__ )
a__ : str = EfficientNetForImageClassification(A__ ).eval()
a__ : List[str] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
a__ : Optional[int] = rename_keys(A__ )
replace_params(A__ , A__ , A__ )
# Initialize preprocessor and preprocess input image
a__ : Optional[Any] = convert_image_processor(A__ )
a__ : List[Any] = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
a__ : Tuple = hf_model(**A__ )
a__ : Union[str, Any] = outputs.logits.detach().numpy()
# Original model inference
a__ : Tuple = False
a__ : List[Any] = CONFIG_MAP[model_name]['image_size']
a__ : Union[str, Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a__ : Optional[Any] = image.img_to_array(A__ )
a__ : Tuple = np.expand_dims(A__ , axis=0 )
a__ : Optional[int] = original_model.predict(A__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(A__ , A__ , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(A__ ):
os.mkdir(A__ )
# Save converted model and image processor
hf_model.save_pretrained(A__ )
preprocessor.save_pretrained(A__ )
if push_to_hub:
# Push model and image processor to hub
print(F'Pushing converted {model_name} to the hub...' )
a__ : Any = F'efficientnet-{model_name}'
preprocessor.push_to_hub(A__ )
hf_model.push_to_hub(A__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase : Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 225 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A : Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
__A : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A : bool = field(
default=__UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def A_ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ , a__ , a__ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__ , a__ , a__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
a__ : Optional[Any] = import_module('tasks' )
try:
a__ : List[Any] = getattr(A__ , model_args.task_type )
a__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a__ : Tuple = token_classification_task.get_labels(data_args.labels )
a__ : Dict[int, str] = dict(enumerate(A__ ) )
a__ : Union[str, Any] = len(A__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A__ , idalabel=A__ , labelaid={label: i for i, label in enumerate(A__ )} , cache_dir=model_args.cache_dir , )
a__ : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a__ : List[Any] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
# Get datasets
a__ : int = (
TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a__ : Optional[int] = (
TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A__ , A__ ) -> Tuple[List[int], List[int]]:
a__ : Union[str, Any] = np.argmax(A__ , axis=2 )
a__ , a__ : Dict = preds.shape
a__ : Union[str, Any] = [[] for _ in range(A__ )]
a__ : Optional[int] = [[] for _ in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A__ ) -> Dict:
a__ , a__ : Union[str, Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A__ , A__ ),
"precision": precision_score(A__ , A__ ),
"recall": recall_score(A__ , A__ ),
"f1": fa_score(A__ , A__ ),
}
# Data collator
a__ : Union[str, Any] = DataCollatorWithPadding(A__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a__ : List[str] = Trainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Any = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a__ : Optional[Any] = trainer.evaluate()
a__ : List[Any] = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A__ , A__ )
writer.write('%s = %s\n' % (key, value) )
results.update(A__ )
# Predict
if training_args.do_predict:
a__ : Optional[Any] = TokenClassificationDataset(
token_classification_task=A__ , data_dir=data_args.data_dir , tokenizer=A__ , labels=A__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a__ , a__ , a__ : Any = trainer.predict(A__ )
a__ , a__ : Union[str, Any] = align_predictions(A__ , A__ )
a__ : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A__ , A__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
a__ : Tuple = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A__ , A__ , A__ )
return results
def A_ ( A__ ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 225 | 1 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a_ = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : int = "https://pypi.org/pypi/diffusers/json"
SCREAMING_SNAKE_CASE : Tuple = json.loads(request.urlopen(_a).read())["releases"].keys()
return sorted(_a , key=lambda _a: version.Version(_a))
def lowerCamelCase__ ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_a)
os.makedirs(_a , exist_ok=_a)
SCREAMING_SNAKE_CASE : str = Path(_a) / "__init__.py"
if not init_path.exists():
init_path.touch()
def lowerCamelCase__ ( _a):
init_hf_modules()
SCREAMING_SNAKE_CASE : Optional[int] = Path(_a) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent)
os.makedirs(_a , exist_ok=_a)
SCREAMING_SNAKE_CASE : List[Any] = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def lowerCamelCase__ ( _a):
with open(_a , "r" , encoding="utf-8") as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE : Optional[Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _a , flags=re.MULTILINE)
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _a , flags=re.MULTILINE)
# Unique-ify
return list(set(_a))
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE : Dict = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE : List[str] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_a))
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(_a).parent
SCREAMING_SNAKE_CASE : Union[str, Any] = [str(module_path / m) for m in new_imports]
SCREAMING_SNAKE_CASE : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE : Optional[Any] = [f"{f}.py" for f in new_import_files]
SCREAMING_SNAKE_CASE : Dict = len(_a) == 0
all_relative_imports.extend(_a)
return all_relative_imports
def lowerCamelCase__ ( _a):
with open(_a , "r" , encoding="utf-8") as f:
SCREAMING_SNAKE_CASE : List[str] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE : Optional[int] = re.findall("^\s*import\s+(\S+)\s*$" , _a , flags=re.MULTILINE)
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _a , flags=re.MULTILINE)
# Only keep the top-level module
SCREAMING_SNAKE_CASE : Optional[int] = [imp.split(".")[0] for imp in imports if not imp.startswith(".")]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE : List[str] = list(set(_a))
SCREAMING_SNAKE_CASE : List[str] = []
for imp in imports:
try:
importlib.import_module(_a)
except ImportError:
missing_packages.append(_a)
if len(_a) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"{', '.join(_a)}. Run `pip install {' '.join(_a)}`")
return get_relative_imports(_a)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = module_path.replace(os.path.sep , ".")
SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(_a)
if class_name is None:
return find_pipeline_class(_a)
return getattr(_a , _a)
def lowerCamelCase__ ( _a):
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE : Optional[int] = dict(inspect.getmembers(_a , inspect.isclass))
SCREAMING_SNAKE_CASE : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _a)
and cls.__module__.split(".")[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
f" {loaded_module}.")
SCREAMING_SNAKE_CASE : Any = cls
return pipeline_class
def lowerCamelCase__ ( _a , _a , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , _a = False , ):
SCREAMING_SNAKE_CASE : Optional[int] = str(_a)
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(_a , _a)
if os.path.isfile(_a):
SCREAMING_SNAKE_CASE : str = module_file_or_url
SCREAMING_SNAKE_CASE : List[Any] = "local"
elif pretrained_model_name_or_path.count("/") == 0:
SCREAMING_SNAKE_CASE : Any = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE : Tuple = "v" + ".".join(__version__.split(".")[:3])
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE : List[Any] = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"Defaulting to latest_version: {revision}.")
elif revision in available_versions:
SCREAMING_SNAKE_CASE : Optional[int] = f"v{revision}"
elif revision == "main":
SCREAMING_SNAKE_CASE : Dict = revision
else:
raise ValueError(
f"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
f" {', '.join(available_versions + ['main'])}.")
# community pipeline on GitHub
SCREAMING_SNAKE_CASE : List[Any] = COMMUNITY_PIPELINES_URL.format(revision=_a , pipeline=_a)
try:
SCREAMING_SNAKE_CASE : List[str] = cached_download(
_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , )
SCREAMING_SNAKE_CASE : Any = "git"
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.")
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
_a , _a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , )
SCREAMING_SNAKE_CASE : Any = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/")))
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.")
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE : int = check_imports(_a)
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_a)
SCREAMING_SNAKE_CASE : str = Path(_a) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_a , submodule_path / module_file)
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE : str = f"{module_needed}.py"
shutil.copy(os.path.join(_a , _a) , submodule_path / module_needed)
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_a , _a):
SCREAMING_SNAKE_CASE : List[Any] = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE : Union[str, Any] = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Union[str, Any] = model_info(_a , revision=_a , token=_a).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE : str = submodule_path / commit_hash
SCREAMING_SNAKE_CASE : Optional[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_a)
if not (submodule_path / module_file).exists():
shutil.copy(_a , submodule_path / module_file)
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_a , f"{module_needed}.py" , cache_dir=_a , force_download=_a , resume_download=_a , proxies=_a , use_auth_token=_a , revision=_a , local_files_only=_a , )
return os.path.join(_a , _a)
def lowerCamelCase__ ( _a , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , _a = False , **_a , ):
SCREAMING_SNAKE_CASE : str = get_cached_module_file(
_a , _a , cache_dir=_a , force_download=_a , resume_download=_a , proxies=_a , use_auth_token=_a , revision=_a , local_files_only=_a , )
return get_class_in_module(_a , final_module.replace(".py" , ""))
| 76 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , ) -> List[Any]:
__lowercase : Any = size if size is not None else {'''height''': 18, '''width''': 18}
__lowercase : Dict = parent
__lowercase : Dict = batch_size
__lowercase : int = num_channels
__lowercase : Union[str, Any] = image_size
__lowercase : Optional[int] = min_resolution
__lowercase : List[str] = max_resolution
__lowercase : Dict = do_resize
__lowercase : Any = size
__lowercase : Any = do_normalize
__lowercase : int = image_mean
__lowercase : Tuple = image_std
def _lowerCamelCase ( self ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =DPTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = DPTImageProcessingTester(self )
@property
def _lowerCamelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__lowercase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowerCamelCase ( self ) -> Optional[int]:
# Initialize image_processing
__lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> List[Any]:
# Initialize image_processing
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Any = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> Tuple:
# Initialize image_processing
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : str = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 249 | 0 |
import os
import sys
a__: int = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a__: Union[str, Any] = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] )->Any:
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Any )->Dict:
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] )->int:
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any] )->Any:
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] )->int:
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any )->Optional[Any]:
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase__( *UpperCamelCase__ : Any , **UpperCamelCase__ : Union[str, Any] )->Tuple:
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 39 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a__: str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a__: list[int] = [ord(letter) for letter in string.ascii_lowercase]
a__: set[int] = {ord(char) for char in VALID_CHARS}
a__: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCamelCase__( UpperCamelCase__ : list[int] , UpperCamelCase__ : tuple[int, ...] )->str | None:
A__ = ""
A__ = 42
A__ = 42
A__ = 42
for keychar, cipherchar in zip(cycle(UpperCamelCase__ ) , UpperCamelCase__ ):
A__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase__ )
return decoded
def UpperCamelCase__( UpperCamelCase__ : list[int] )->list[str]:
A__ = []
for key in product(UpperCamelCase__ , repeat=3 ):
A__ = try_key(UpperCamelCase__ , UpperCamelCase__ )
if encoded is not None:
possibles.append(UpperCamelCase__ )
return possibles
def UpperCamelCase__( UpperCamelCase__ : list[str] , UpperCamelCase__ : str )->list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCamelCase__( UpperCamelCase__ : str = "p059_cipher.txt" )->int:
A__ = 42
A__ = 42
A__ = 42
A__ = 42
A__ = Path(UpperCamelCase__ ).parent.joinpath(UpperCamelCase__ ).read_text(encoding='''utf-8''' )
A__ = [int(UpperCamelCase__ ) for number in data.strip().split(''',''' )]
A__ = filter_valid_chars(UpperCamelCase__ )
for common_word in COMMON_WORDS:
A__ = filter_common_word(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
break
A__ = possibles[0]
return sum(ord(UpperCamelCase__ ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }")
| 39 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class A__ ( unittest.TestCase ):
def A ( self : Any ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_SCREAMING_SNAKE_CASE =get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowercase_ ) , torch_builtin(lowercase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase_ ) , gelu_new(lowercase_ ) ) )
def A ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_SCREAMING_SNAKE_CASE =get_activation('gelu' )
_SCREAMING_SNAKE_CASE =get_activation('gelu_10' )
_SCREAMING_SNAKE_CASE =torch_builtin(lowercase_ )
_SCREAMING_SNAKE_CASE =geluaa(lowercase_ )
_SCREAMING_SNAKE_CASE =torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowercase_ ):
get_activation('bogus' )
with self.assertRaises(lowercase_ ):
get_activation(lowercase_ )
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =get_activation('gelu' )
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase_ ):
_SCREAMING_SNAKE_CASE =acta.a
| 47 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase_ , '''hidden_sizes'''))
self.parent.assertTrue(hasattr(lowercase_ , '''num_attention_heads'''))
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any]=13 , lowercase_ : Dict=64 , lowercase_ : Dict=3 , lowercase_ : Optional[Any]=3 , lowercase_ : List[Any]=2 , lowercase_ : Any=1 , lowercase_ : List[Any]=16 , lowercase_ : int=[128, 256, 384] , lowercase_ : str=[4, 6, 8] , lowercase_ : Optional[Any]=[2, 3, 4] , lowercase_ : Union[str, Any]=[16, 16, 16] , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[int]=[2, 2, 2] , lowercase_ : Any=[2, 2, 2] , lowercase_ : List[str]=0.02 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=2 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = kernel_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = stride
SCREAMING_SNAKE_CASE_ : List[str] = padding
SCREAMING_SNAKE_CASE_ : int = hidden_sizes
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : int = depths
SCREAMING_SNAKE_CASE_ : Optional[Any] = key_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_ratio
SCREAMING_SNAKE_CASE_ : str = mlp_ratio
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : Tuple = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_labels)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = LevitModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = image_size[0], image_size[1]
for _ in range(4):
SCREAMING_SNAKE_CASE_ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1)
SCREAMING_SNAKE_CASE_ : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LevitModelTester(self)
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''')
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''')
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
def check_hidden_states_output(lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str):
SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_))
SCREAMING_SNAKE_CASE_ : str = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Optional[int] = len(self.model_tester.depths) + 1
self.assertEqual(len(lowercase_) , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size[0], image_size[1]
for _ in range(4):
SCREAMING_SNAKE_CASE_ : Optional[Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
SCREAMING_SNAKE_CASE_ : Optional[int] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=False):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_)
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_)
model.to(lowercase_)
model.train()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model(**lowercase_).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE_ : List[str] = model_class(lowercase_)
model.gradient_checkpointing_enable()
model.to(lowercase_)
model.train()
SCREAMING_SNAKE_CASE_ : Dict = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = model(**lowercase_).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}'):
SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''title''']
SCREAMING_SNAKE_CASE_ : Optional[int] = problem_type['''num_labels''']
SCREAMING_SNAKE_CASE_ : str = model_class(lowercase_)
model.to(lowercase_)
model.train()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE_ : str = inputs['''labels'''].unsqueeze(1).repeat(1 , problem_type['''num_labels'''])
SCREAMING_SNAKE_CASE_ : Any = inputs['''labels'''].to(problem_type['''dtype'''])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_) as warning_list:
SCREAMING_SNAKE_CASE_ : int = model(**lowercase_).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}')
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = LevitModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _A () -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : str = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(**lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([1.04_48, -0.37_45, -1.83_17]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
| 91 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Any = 'luke'
def __init__(self : int , __UpperCAmelCase : str=5_0_2_6_7 , __UpperCAmelCase : List[Any]=5_0_0_0_0_0 , __UpperCAmelCase : Union[str, Any]=7_6_8 , __UpperCAmelCase : Union[str, Any]=2_5_6 , __UpperCAmelCase : Optional[Any]=1_2 , __UpperCAmelCase : Any=1_2 , __UpperCAmelCase : Union[str, Any]=3_0_7_2 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Any=5_1_2 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-12 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : List[str]=2 , **__UpperCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = entity_vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = entity_emb_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = use_entity_aware_attention
UpperCAmelCase__ = classifier_dropout
| 352 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = Dict[str, Any]
UpperCamelCase__ = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class A ( UpperCAmelCase_ ):
def __init__(self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowercase_ (self : Optional[Any] , **__UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = {}
if "threshold" in kwargs:
UpperCAmelCase__ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__(self : Optional[int] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Dict ) -> Union[Predictions, List[Prediction]]:
"""simple docstring"""
return super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(__UpperCAmelCase )
UpperCAmelCase__ = torch.IntTensor([[image.height, image.width]] )
UpperCAmelCase__ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
UpperCAmelCase__ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
UpperCAmelCase__ = target_size
return inputs
def lowercase_ (self : List[str] , __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = model_inputs.pop("target_size" )
UpperCAmelCase__ = self.model(**__UpperCAmelCase )
UpperCAmelCase__ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
UpperCAmelCase__ = model_inputs["bbox"]
return model_outputs
def lowercase_ (self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict=0.9 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCAmelCase__ , UpperCAmelCase__ = target_size[0].tolist()
def unnormalize(__UpperCAmelCase : List[str] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
UpperCAmelCase__ , UpperCAmelCase__ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCAmelCase__ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCAmelCase__ = [unnormalize(__UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
UpperCAmelCase__ = ["score", "label", "box"]
UpperCAmelCase__ = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for vals in zip(scores.tolist() , __UpperCAmelCase , __UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCAmelCase__ = self.image_processor.post_process_object_detection(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = raw_annotations[0]
UpperCAmelCase__ = raw_annotation["scores"]
UpperCAmelCase__ = raw_annotation["labels"]
UpperCAmelCase__ = raw_annotation["boxes"]
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = [self.model.config.idalabel[label.item()] for label in labels]
UpperCAmelCase__ = [self._get_bounding_box(__UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCAmelCase__ = ["score", "label", "box"]
UpperCAmelCase__ = [
dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def lowercase_ (self : List[str] , __UpperCAmelCase : "torch.Tensor" ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = box.int().tolist()
UpperCAmelCase__ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 143 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0_0_0_0_0 ):
lowercase_ : Union[str, Any] = set(range(3 , __snake_case , 2 ) )
primes.add(2 )
for p in range(3 , __snake_case , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __snake_case , __snake_case ) ) )
lowercase_ : Dict = [float(__snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(__snake_case , limit + 1 , __snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : List[str] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''MobileNetV2FeatureExtractor''']
__A : Optional[int] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33 | 1 |
'''simple docstring'''
from functools import lru_cache
def __a ( _UpperCamelCase: List[Any] ) -> set:
"""simple docstring"""
_snake_case = 2
_snake_case = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCAmelCase__ )
if n > 1:
factors.add(UpperCAmelCase__ )
return factors
@lru_cache
def __a ( _UpperCamelCase: List[str] ) -> int:
"""simple docstring"""
return len(unique_prime_factors(UpperCAmelCase__ ) )
def __a ( _UpperCamelCase: Tuple ) -> bool:
"""simple docstring"""
return len(set(UpperCAmelCase__ ) ) in (0, 1)
def __a ( _UpperCamelCase: str ) -> list:
"""simple docstring"""
_snake_case = 2
while True:
# Increment each value of a generated range
_snake_case = [base + i for i in range(UpperCAmelCase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_snake_case = [upf_len(UpperCAmelCase__ ) for x in group]
checker.append(UpperCAmelCase__ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCAmelCase__ ):
return group
# Increment our base variable by 1
base += 1
def __a ( _UpperCamelCase: List[str] = 4 ) -> int:
"""simple docstring"""
_snake_case = run(UpperCAmelCase__ )
return results[0] if len(UpperCAmelCase__ ) else None
if __name__ == "__main__":
print(solution())
| 363 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCamelCase_ : List[Any] = None
UpperCamelCase_ : Tuple = logging.get_logger(__name__)
UpperCamelCase_ : Any = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ : Any = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ : Optional[int] = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
UpperCamelCase_ : List[str] = '''▁'''
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : Tuple = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE="[SEP]" ,_SCREAMING_SNAKE_CASE="[MASK]" ,_SCREAMING_SNAKE_CASE="[CLS]" ,**_SCREAMING_SNAKE_CASE ,) -> Dict:
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else bos_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else eos_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else unk_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else pad_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else cls_token
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 142 | 0 |
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
SCREAMING_SNAKE_CASE_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : Tuple = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ : int = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
lowerCamelCase__ : str = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = SqueezeBertTokenizer
def __init__( self : Tuple , _lowerCAmelCase : Dict=None , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : str="[UNK]" , _lowerCAmelCase : Union[str, Any]="[SEP]" , _lowerCAmelCase : List[Any]="[PAD]" , _lowerCAmelCase : str="[CLS]" , _lowerCAmelCase : Dict="[MASK]" , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : str , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = strip_accents
SCREAMING_SNAKE_CASE_ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ = normalizer_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = do_lower_case
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]=None ):
SCREAMING_SNAKE_CASE_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 225 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A ={
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 283 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Tuple = use_attention_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : str = num_choices
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : Optional[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = True
lowerCAmelCase :Any = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = FlaxRoFormerModelTester(self)
@slow
def snake_case__ ( self):
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowerCamelCase)
UpperCAmelCase__ : Dict = model(np.ones((1, 1)))
self.assertIsNotNone(_lowerCamelCase)
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Any = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""")
UpperCAmelCase__ : int = jnp.array([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)[0]
UpperCAmelCase__ : Union[str, Any] = 5_0000
UpperCAmelCase__ : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4))
| 283 | 1 |
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
_UpperCAmelCase = False
if num < 0:
_UpperCAmelCase = True
_UpperCAmelCase = -num
_UpperCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(__lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 |
from __future__ import annotations
def __A ( __lowerCAmelCase )-> list[int]:
"""simple docstring"""
_UpperCAmelCase = 2
_UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__lowerCAmelCase )
if n > 1:
factors.append(__lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "SpeechT5FeatureExtractor"
_UpperCAmelCase :Optional[int] = "SpeechT5Tokenizer"
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: Optional[Any] = kwargs.pop('''audio''' , _UpperCAmelCase )
lowercase__: Tuple = kwargs.pop('''text''' , _UpperCAmelCase )
lowercase__: Dict = kwargs.pop('''text_target''' , _UpperCAmelCase )
lowercase__: Dict = kwargs.pop('''audio_target''' , _UpperCAmelCase )
lowercase__: Union[str, Any] = kwargs.pop('''sampling_rate''' , _UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowercase__: List[Any] = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
elif text is not None:
lowercase__: Optional[int] = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
else:
lowercase__: Optional[Any] = None
if audio_target is not None:
lowercase__: Union[str, Any] = self.feature_extractor(audio_target=_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Tuple = targets['''input_values''']
elif text_target is not None:
lowercase__: Dict = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Union[str, Any] = targets['''input_ids''']
else:
lowercase__: Tuple = None
if inputs is None:
return targets
if targets is not None:
lowercase__: Union[str, Any] = labels
lowercase__: Tuple = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__: Any = decoder_attention_mask
return inputs
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: Any = kwargs.pop('''input_values''' , _UpperCAmelCase )
lowercase__: List[Any] = kwargs.pop('''input_ids''' , _UpperCAmelCase )
lowercase__: Any = kwargs.pop('''labels''' , _UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowercase__: List[str] = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
elif input_ids is not None:
lowercase__: str = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
else:
lowercase__: Tuple = None
if labels is not None:
if "input_ids" in labels or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and "input_ids" in labels[0]):
lowercase__: Tuple = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Union[str, Any] = targets['''input_ids''']
else:
lowercase__: List[Any] = self.feature_extractor.feature_size
lowercase__: List[Any] = self.feature_extractor.num_mel_bins
lowercase__: Union[str, Any] = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: List[Any] = feature_size_hack
lowercase__: str = targets['''input_values''']
else:
lowercase__: Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
lowercase__: Dict = labels
lowercase__: Optional[int] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__: Any = decoder_attention_mask
return inputs
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
| 2 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = DebertaVaTokenizer
_UpperCAmelCase :Tuple = DebertaVaTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :int = True
def _snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = '''this is a test'''
lowercase__: int = '''this is a test'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: Optional[int] = '''<pad>'''
lowercase__: Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_UpperCAmelCase ) , 30001 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _snake_case ( self ):
# fmt: off
lowercase__: int = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# fmt: off
lowercase__: Dict = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Any = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.'''
lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.get_tokenizer()
lowercase__: List[Any] = self.get_rust_tokenizer()
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.get_rust_tokenizer()
lowercase__: str = tokenizer.encode(_UpperCAmelCase )
lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''This is a test'''
lowercase__: str = [13, 1, 4398, 25, 21, 1289]
lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# fmt: off
lowercase__: str = '''I was born in 92000, and this is falsé.'''
lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase )
lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' )
lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' )
lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , )
@slow
def _snake_case ( self ):
# fmt: off
lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 1 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as input_file:
UpperCAmelCase : List[str] = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
UpperCAmelCase : str = input_file.read()
UpperCAmelCase : int = regexp.search(_SCREAMING_SNAKE_CASE )
return match
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as input_file:
UpperCAmelCase : Union[str, Any] = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
UpperCAmelCase : Optional[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase : int = regexp.finditer(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = Path("""./datasets""" )
UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_SCREAMING_SNAKE_CASE ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = Path("""./datasets""" )
UpperCAmelCase : Optional[Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_SCREAMING_SNAKE_CASE ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 109 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
snake_case__ : Optional[int] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = 'sshleifer/tiny-gpt2'
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : Optional[Any] = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = 'sgugger/tiny-distilbert-classification'
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , only_pretrain_model=__UpperCamelCase , )
snake_case__ : Dict = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Dict = 'sshleifer/tiny-gpt2'
snake_case__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , torchscript=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : Optional[Any] = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Any = 'sshleifer/tiny-gpt2'
snake_case__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , fpaa=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : Optional[int] = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
snake_case__ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
# set architectures equal to `None`
snake_case__ : Union[str, Any] = None
snake_case__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : List[Any] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[Any] = 'sshleifer/tiny-gpt2'
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : str = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = 'sshleifer/tiny-gpt2'
snake_case__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCamelCase , multi_process=__UpperCamelCase , )
snake_case__ : List[Any] = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
snake_case__ : Optional[Any] = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : List[str] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = 'sshleifer/tinier_bart'
snake_case__ : List[str] = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : int = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Any = 'sshleifer/tiny-gpt2'
snake_case__ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : int = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = 'sshleifer/tinier_bart'
snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : Union[str, Any] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , save_to_csv=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCamelCase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(__UpperCamelCase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(__UpperCamelCase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(__UpperCamelCase , 'train_time.csv' ) , env_info_csv_file=os.path.join(__UpperCamelCase , 'env.csv' ) , multi_process=__UpperCamelCase , )
snake_case__ : Union[str, Any] = PyTorchBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'env.csv' ) ).exists() )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase , 'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'current' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCamelCase , 'log.txt' ) , log_print=__UpperCamelCase , trace_memory_line_by_line=__UpperCamelCase , multi_process=__UpperCamelCase , )
snake_case__ : int = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'log.txt' ) ).exists() )
| 143 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=4 , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : str = use_attention_mask
UpperCAmelCase_ : List[Any] = use_token_type_ids
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : str = num_choices
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_attention_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : List[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = config_and_inputs
UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = config_and_inputs
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : Dict = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : str = model_class_name.from_pretrained("roberta-base" , from_pt=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
| 23 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 23 | 1 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowerCAmelCase__ : str = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
lowerCAmelCase__ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def a_ ( ):
UpperCAmelCase__ = 'https://pypi.org/pypi/diffusers/json'
UpperCAmelCase__ = json.loads(request.urlopen(lowerCamelCase ).read() )['releases'].keys()
return sorted(lowerCamelCase , key=lambda lowerCamelCase : version.Version(lowerCamelCase ) )
def a_ ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
UpperCAmelCase__ = Path(lowerCamelCase ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def a_ ( lowerCamelCase ):
init_hf_modules()
UpperCAmelCase__ = Path(lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
UpperCAmelCase__ = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase__ = f.read()
# Imports of the form `import .xxx`
UpperCAmelCase__ = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase ) )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = False
UpperCAmelCase__ = [module_file]
UpperCAmelCase__ = []
# Let's recurse through all relative imports
while not no_change:
UpperCAmelCase__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase ) )
UpperCAmelCase__ = Path(lowerCamelCase ).parent
UpperCAmelCase__ = [str(module_path / m ) for m in new_imports]
UpperCAmelCase__ = [f for f in new_import_files if f not in all_relative_imports]
UpperCAmelCase__ = [f'''{f}.py''' for f in new_import_files]
UpperCAmelCase__ = len(lowerCamelCase ) == 0
all_relative_imports.extend(lowerCamelCase )
return all_relative_imports
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase__ = f.read()
# Imports of the form `import xxx`
UpperCAmelCase__ = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
UpperCAmelCase__ = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
UpperCAmelCase__ = list(set(lowerCamelCase ) )
UpperCAmelCase__ = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase )
except ImportError:
missing_packages.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f'''{", ".join(lowerCamelCase )}. Run `pip install {" ".join(lowerCamelCase )}`''' )
return get_relative_imports(lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = module_path.replace(os.path.sep , '.' )
UpperCAmelCase__ = importlib.import_module(lowerCamelCase )
if class_name is None:
return find_pipeline_class(lowerCamelCase )
return getattr(lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
from ..pipelines import DiffusionPipeline
UpperCAmelCase__ = dict(inspect.getmembers(lowerCamelCase , inspect.isclass ) )
UpperCAmelCase__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
f''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
f''' {loaded_module}.''' )
UpperCAmelCase__ = cls
return pipeline_class
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , ):
UpperCAmelCase__ = str(lowerCamelCase )
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.isfile(lowerCamelCase ):
UpperCAmelCase__ = module_file_or_url
UpperCAmelCase__ = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
UpperCAmelCase__ = get_diffusers_versions()
# cut ".dev0"
UpperCAmelCase__ = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
UpperCAmelCase__ = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
UpperCAmelCase__ = f'''v{revision}'''
elif revision == "main":
UpperCAmelCase__ = revision
else:
raise ValueError(
f'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
f''' {", ".join(available_versions + ["main"] )}.''' )
# community pipeline on GitHub
UpperCAmelCase__ = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase , pipeline=lowerCamelCase )
try:
UpperCAmelCase__ = cached_download(
lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , proxies=lowerCamelCase , resume_download=lowerCamelCase , local_files_only=lowerCamelCase , use_auth_token=lowerCamelCase , )
UpperCAmelCase__ = 'git'
UpperCAmelCase__ = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
UpperCAmelCase__ = hf_hub_download(
lowerCamelCase , lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , proxies=lowerCamelCase , resume_download=lowerCamelCase , local_files_only=lowerCamelCase , use_auth_token=lowerCamelCase , )
UpperCAmelCase__ = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
UpperCAmelCase__ = check_imports(lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
UpperCAmelCase__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase )
UpperCAmelCase__ = Path(lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
UpperCAmelCase__ = f'''{module_needed}.py'''
shutil.copy(os.path.join(lowerCamelCase , lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = use_auth_token
elif use_auth_token is True:
UpperCAmelCase__ = HfFolder.get_token()
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = model_info(lowerCamelCase , revision=lowerCamelCase , token=lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCAmelCase__ = submodule_path / commit_hash
UpperCAmelCase__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase , f'''{module_needed}.py''' , cache_dir=lowerCamelCase , force_download=lowerCamelCase , resume_download=lowerCamelCase , proxies=lowerCamelCase , use_auth_token=lowerCamelCase , revision=lowerCamelCase , local_files_only=lowerCamelCase , )
return os.path.join(lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , **lowerCamelCase , ):
UpperCAmelCase__ = get_cached_module_file(
lowerCamelCase , lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , resume_download=lowerCamelCase , proxies=lowerCamelCase , use_auth_token=lowerCamelCase , revision=lowerCamelCase , local_files_only=lowerCamelCase , )
return get_class_in_module(lowerCamelCase , final_module.replace('.py' , '' ) )
| 98 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict = logging.get_logger(__name__)
_A : Union[str, Any] = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = "vit_msn"
def __init__( self : Optional[Any] , A : Dict=7_6_8 , A : Union[str, Any]=1_2 , A : Optional[Any]=1_2 , A : List[Any]=3_0_7_2 , A : List[str]="gelu" , A : Optional[int]=0.0 , A : int=0.0 , A : int=0.02 , A : Tuple=1e-06 , A : int=2_2_4 , A : Union[str, Any]=1_6 , A : Dict=3 , A : Optional[Any]=True , **A : Optional[Any] , ) ->Dict:
super().__init__(**A )
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Optional[int] = layer_norm_eps
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : Tuple = qkv_bias
| 142 | 0 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__lowerCamelCase = get_tests_dir("fixtures")
__lowerCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowerCamelCase = get_tests_dir("fixtures/dummy-config.json")
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Union[str, Any]:
A__ = 0
def snake_case__ ( self ) -> List[Any]:
A__ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> List[Any]:
A__ = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
A__ = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase ).to_dict()
config_dict.pop('feature_extractor_type' )
A__ = WavaVecaFeatureExtractor(**__UpperCAmelCase )
# save in new folder
model_config.save_pretrained(__UpperCAmelCase )
config.save_pretrained(__UpperCAmelCase )
A__ = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
# make sure private variable is not incorrectly saved
A__ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Dict:
A__ = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
__UpperCAmelCase ,'bert-base is not a local folder and is not a valid model identifier' ):
A__ = AutoFeatureExtractor.from_pretrained('bert-base' )
def snake_case__ ( self ) -> Any:
with self.assertRaisesRegex(
__UpperCAmelCase ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
A__ = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase ,revision='aaaaaa' )
def snake_case__ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
__UpperCAmelCase ,'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' ,):
A__ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def snake_case__ ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCAmelCase ):
A__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCAmelCase ):
A__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=__UpperCAmelCase )
A__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCAmelCase )
A__ = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase ,trust_remote_code=__UpperCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
def snake_case__ ( self ) -> str:
try:
AutoConfig.register('custom' ,__UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase ,__UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoFeatureExtractor.register(__UpperCAmelCase ,__UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ = CustomFeatureExtractor.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCAmelCase )
A__ = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self ) -> Dict:
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[Any] = True
try:
AutoConfig.register('custom' ,__UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase ,__UpperCAmelCase )
# If remote code is not set, the default is to use local
A__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
A__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
A__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(not hasattr(__UpperCAmelCase ,'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 362 |
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ = 4_000_000 ):
"""simple docstring"""
A__ = [0, 1]
A__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A__ = 0
for j in range(len(UpperCamelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 154 | 0 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , __A , __A ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0" )
lowerCamelCase : Optional[int] = img
lowerCamelCase : Any = img.shape[1]
lowerCamelCase : Optional[Any] = img.shape[0]
lowerCamelCase : List[Any] = dst_width
lowerCamelCase : int = dst_height
lowerCamelCase : List[str] = self.src_w / self.dst_w
lowerCamelCase : Dict = self.src_h / self.dst_h
lowerCamelCase : str = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _snake_case ( self ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowerCamelCase : Optional[int] = self.img[self.get_y(__A )][self.get_x(__A )]
def _snake_case ( self , __A ):
"""simple docstring"""
return int(self.ratio_x * x )
def _snake_case ( self , __A ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
_snake_case , _snake_case = 8_00, 6_00
_snake_case = imread('''image_data/lena.jpg''', 1)
_snake_case = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 283 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , *__A , **__A ):
"""simple docstring"""
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __A , )
super().__init__(*__A , **__A )
| 283 | 1 |
'''simple docstring'''
def a ( __a ) -> set:
'''simple docstring'''
UpperCamelCase__ :Tuple = set()
# edges = list of graph's edges
UpperCamelCase__ :Optional[Any] = get_edges(__a )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCamelCase__ , UpperCamelCase__ :int = edges.pop()
chosen_vertices.add(__a )
chosen_vertices.add(__a )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__a )
return chosen_vertices
def a ( __a ) -> set:
'''simple docstring'''
UpperCamelCase__ :Any = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 219 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase__ :List[Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = logging.get_verbosity()
UpperCamelCase__ :Union[str, Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
UpperCamelCase__ :Optional[Any] = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(UpperCamelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase__ :Tuple = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
UpperCamelCase__ :Any = os.getenv('''TRANSFORMERS_VERBOSITY''' , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = logging.log_levels[env_level_str]
UpperCamelCase__ :int = logging.get_verbosity()
self.assertEqual(
UpperCamelCase_ , UpperCamelCase_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase__ :Union[str, Any] = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ :Dict = logging.logging.getLogger()
with CaptureLogger(UpperCamelCase_ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ :Optional[int] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
UpperCamelCase__ :int = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning_advice(UpperCamelCase_ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning_advice(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
def a ( ) -> str:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 219 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = """SpeechT5FeatureExtractor"""
lowerCAmelCase__ : Union[str, Any] = """SpeechT5Tokenizer"""
def __init__(self : Any , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ):
'''simple docstring'''
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__(self : Optional[int] , *UpperCamelCase : int , **UpperCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = kwargs.pop('''audio''' , UpperCamelCase )
lowercase__ = kwargs.pop('''text''' , UpperCamelCase )
lowercase__ = kwargs.pop('''text_target''' , UpperCamelCase )
lowercase__ = kwargs.pop('''audio_target''' , UpperCamelCase )
lowercase__ = kwargs.pop('''sampling_rate''' , UpperCamelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowercase__ = self.feature_extractor(UpperCamelCase , *UpperCamelCase , sampling_rate=UpperCamelCase , **UpperCamelCase )
elif text is not None:
lowercase__ = self.tokenizer(UpperCamelCase , **UpperCamelCase )
else:
lowercase__ = None
if audio_target is not None:
lowercase__ = self.feature_extractor(audio_target=UpperCamelCase , *UpperCamelCase , sampling_rate=UpperCamelCase , **UpperCamelCase )
lowercase__ = targets['''input_values''']
elif text_target is not None:
lowercase__ = self.tokenizer(UpperCamelCase , **UpperCamelCase )
lowercase__ = targets['''input_ids''']
else:
lowercase__ = None
if inputs is None:
return targets
if targets is not None:
lowercase__ = labels
lowercase__ = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__ = decoder_attention_mask
return inputs
def UpperCamelCase__ (self : Tuple , *UpperCamelCase : Optional[Any] , **UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = kwargs.pop('''input_values''' , UpperCamelCase )
lowercase__ = kwargs.pop('''input_ids''' , UpperCamelCase )
lowercase__ = kwargs.pop('''labels''' , UpperCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowercase__ = self.feature_extractor.pad(UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
elif input_ids is not None:
lowercase__ = self.tokenizer.pad(UpperCamelCase , **UpperCamelCase )
else:
lowercase__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCamelCase , UpperCamelCase ) and "input_ids" in labels[0]):
lowercase__ = self.tokenizer.pad(UpperCamelCase , **UpperCamelCase )
lowercase__ = targets['''input_ids''']
else:
lowercase__ = self.feature_extractor.feature_size
lowercase__ = self.feature_extractor.num_mel_bins
lowercase__ = self.feature_extractor.pad(UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
lowercase__ = feature_size_hack
lowercase__ = targets['''input_values''']
else:
lowercase__ = None
if inputs is None:
return targets
if targets is not None:
lowercase__ = labels
lowercase__ = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__ = decoder_attention_mask
return inputs
def UpperCamelCase__ (self : str , *UpperCamelCase : int , **UpperCamelCase : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
| 2 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = """cvt"""
def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = patch_stride
lowercase__ = patch_padding
lowercase__ = embed_dim
lowercase__ = num_heads
lowercase__ = depth
lowercase__ = mlp_ratio
lowercase__ = attention_drop_rate
lowercase__ = drop_rate
lowercase__ = drop_path_rate
lowercase__ = qkv_bias
lowercase__ = cls_token
lowercase__ = qkv_projection_method
lowercase__ = kernel_qkv
lowercase__ = padding_kv
lowercase__ = stride_kv
lowercase__ = padding_q
lowercase__ = stride_q
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
| 2 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Tuple = ort.SessionOptions()
_A: Tuple = False
return options
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
_A: Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
_A: str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
_A: List[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A: List[Any] = '''A red cat sitting on a park bench'''
_A: Any = np.random.RandomState(0 )
_A: Tuple = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=lowerCAmelCase_ , output_type='''np''' , )
_A: Optional[int] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 301 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = (DDPMParallelScheduler,)
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __magic_name__ ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config()
_A: Optional[Any] = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Any = self.scheduler_classes[0]
_A: List[str] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: List[Any] = len(lowerCAmelCase_ )
_A: Union[str, Any] = self.dummy_model()
_A: Dict = self.dummy_sample_deter
_A: Dict = self.dummy_sample_deter + 0.1
_A: str = self.dummy_sample_deter - 0.1
_A: str = samplea.shape[0]
_A: Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
_A: List[str] = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_A: List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_A: Optional[int] = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_A: Dict = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: List[str] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.scheduler_classes[0]
_A: List[Any] = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Optional[int] = self.dummy_sample_deter
_A: List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: Optional[int] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: List[Any] = pred_prev_sample
_A: Optional[int] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: Any = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_A: List[str] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Any = self.dummy_sample_deter
_A: str = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: int = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: Tuple = pred_prev_sample
_A: List[Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: str = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Dict = scheduler_class(**lowerCAmelCase_ )
_A: Any = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_A: Tuple = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_A: Dict = -1
else:
_A: int = timesteps[i + 1]
_A: List[str] = scheduler.previous_timestep(lowerCAmelCase_ )
_A: str = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_A: Dict = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: str = scheduler_class(**lowerCAmelCase_ )
_A: Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 301 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__: Optional[int] = logging.get_logger(__name__)
UpperCamelCase__: int = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """vit"""
def __init__( self : Dict , __snake_case : int=768 , __snake_case : Optional[int]=12 , __snake_case : Any=12 , __snake_case : Optional[Any]=3072 , __snake_case : Any="gelu" , __snake_case : str=0.0 , __snake_case : str=0.0 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=1E-12 , __snake_case : List[str]=224 , __snake_case : Tuple=16 , __snake_case : Dict=3 , __snake_case : List[str]=True , __snake_case : Optional[int]=16 , **__snake_case : Dict , ) -> Optional[Any]:
super().__init__(**__snake_case )
UpperCAmelCase : str = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Optional[int] = layer_norm_eps
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Any = patch_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : Any = qkv_bias
UpperCAmelCase : Union[str, Any] = encoder_stride
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse("""1.11""" )
@property
def A ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A ( self : Union[str, Any] ) -> float:
return 1E-4
| 23 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """MCTCTFeatureExtractor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self : Dict , __snake_case : Optional[int] , __snake_case : List[str] ) -> str:
super().__init__(__snake_case , __snake_case )
UpperCAmelCase : List[Any] = self.feature_extractor
UpperCAmelCase : Union[str, Any] = False
def __call__( self : Any , *__snake_case : List[str] , **__snake_case : Any ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
UpperCAmelCase : int = kwargs.pop('''raw_speech''' )
else:
UpperCAmelCase : Union[str, Any] = kwargs.pop('''audio''' , __snake_case )
UpperCAmelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __snake_case )
UpperCAmelCase : Dict = kwargs.pop('''text''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase : Any = args[0]
UpperCAmelCase : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
UpperCAmelCase : List[str] = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
UpperCAmelCase : int = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase : str = encodings['''input_ids''']
return inputs
def A ( self : List[Any] , *__snake_case : List[Any] , **__snake_case : List[Any] ) -> str:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def A ( self : List[Any] , *__snake_case : int , **__snake_case : Optional[int] ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__snake_case , **__snake_case )
UpperCAmelCase : List[Any] = kwargs.pop('''input_features''' , __snake_case )
UpperCAmelCase : Optional[Any] = kwargs.pop('''labels''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase : List[str] = args[0]
UpperCAmelCase : List[Any] = args[1:]
if input_features is not None:
UpperCAmelCase : Tuple = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
if labels is not None:
UpperCAmelCase : Optional[int] = self.tokenizer.pad(__snake_case , **__snake_case )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase : List[str] = labels['''input_ids''']
return input_features
def A ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@contextmanager
def A ( self : Any ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
UpperCAmelCase : Dict = True
UpperCAmelCase : List[Any] = self.tokenizer
yield
UpperCAmelCase : Tuple = self.feature_extractor
UpperCAmelCase : List[Any] = False
| 23 | 1 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _SCREAMING_SNAKE_CASE ( __a ):
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = SMALL_MODEL_IDENTIFIER
__magic_name__ = '''pt'''
__magic_name__ = '''tf'''
def snake_case__ ( self : Optional[Any] , a__ : Optional[int] ):
__magic_name__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(a__ )
def snake_case__ ( self : int , a__ : str ):
__magic_name__ = TFAutoModel.from_pretrained(self.test_model , from_pt=a__ )
model_tf.save_pretrained(a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = '''mock_framework'''
# Framework provided - return whatever the user provides
__magic_name__ = FeaturesManager.determine_framework(self.test_model , a__ )
self.assertEqual(a__ , a__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(a__ )
__magic_name__ = FeaturesManager.determine_framework(a__ , a__ )
self.assertEqual(a__ , a__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(a__ )
__magic_name__ = FeaturesManager.determine_framework(a__ , a__ )
self.assertEqual(a__ , a__ )
def snake_case__ ( self : Dict ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(a__ )
__magic_name__ = FeaturesManager.determine_framework(a__ )
self.assertEqual(a__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(a__ )
__magic_name__ = FeaturesManager.determine_framework(a__ )
self.assertEqual(a__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(a__ ):
__magic_name__ = FeaturesManager.determine_framework(a__ )
def snake_case__ ( self : List[str] ):
__magic_name__ = MagicMock(return_value=a__ )
with patch('''transformers.onnx.features.is_tf_available''' , a__ ):
__magic_name__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(a__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__magic_name__ = MagicMock(return_value=a__ )
with patch('''transformers.onnx.features.is_torch_available''' , a__ ):
__magic_name__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(a__ , self.framework_tf )
# Both in environment -> use PyTorch
__magic_name__ = MagicMock(return_value=a__ )
__magic_name__ = MagicMock(return_value=a__ )
with patch('''transformers.onnx.features.is_tf_available''' , a__ ), patch(
'''transformers.onnx.features.is_torch_available''' , a__ ):
__magic_name__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(a__ , self.framework_pt )
# Both not in environment -> raise error
__magic_name__ = MagicMock(return_value=a__ )
__magic_name__ = MagicMock(return_value=a__ )
with patch('''transformers.onnx.features.is_tf_available''' , a__ ), patch(
'''transformers.onnx.features.is_torch_available''' , a__ ):
with self.assertRaises(a__ ):
__magic_name__ = FeaturesManager.determine_framework(self.test_model )
| 352 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCamelCase ( a , a , a , a=1024 ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = [], []
__magic_name__ = list(zip(a , a ) )
__magic_name__ , __magic_name__ = sorted_examples[0]
def is_too_big(a ):
return tok(a , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__magic_name__ = new_src + ''' ''' + src
__magic_name__ = new_tgt + ''' ''' + tgt
if is_too_big(a ) or is_too_big(a ): # cant fit, finalize example
finished_src.append(a )
finished_tgt.append(a )
__magic_name__ , __magic_name__ = src, tgt
else: # can fit, keep adding
__magic_name__ , __magic_name__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a )
finished_tgt.append(a )
return finished_src, finished_tgt
def UpperCamelCase ( a , a , a , a ) -> Any:
'''simple docstring'''
__magic_name__ = Path(a )
save_path.mkdir(exist_ok=a )
for split in ["train"]:
__magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
__magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()]
__magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()]
__magic_name__ , __magic_name__ = pack_examples(a , a , a , a )
print(F'''packed {split} split from {len(a )} examples -> {len(a )}.''' )
Path(save_path / F'''{split}.source''' ).open('''w''' ).write('''\n'''.join(a ) )
Path(save_path / F'''{split}.target''' ).open('''w''' ).write('''\n'''.join(a ) )
for split in ["val", "test"]:
__magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(a , save_path / F'''{split}.source''' )
shutil.copyfile(a , save_path / F'''{split}.target''' )
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=a , default=128 )
parser.add_argument('''--data_dir''' , type=a )
parser.add_argument('''--save_path''' , type=a )
__magic_name__ = parser.parse_args()
__magic_name__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 98 | 0 |
from math import sqrt
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
__SCREAMING_SNAKE_CASE : str = True
# 0 and 1 are none primes.
if number <= 1:
__SCREAMING_SNAKE_CASE : Any = False
for divisor in range(2 , int(round(sqrt(lowercase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__SCREAMING_SNAKE_CASE : List[Any] = False
break
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'status' must been from type bool"
return status
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(2 , n + 1 ) )
__SCREAMING_SNAKE_CASE : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowercase__ ) ):
for j in range(i + 1 , len(lowercase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__SCREAMING_SNAKE_CASE : List[Any] = 0
# filters actual prime numbers.
__SCREAMING_SNAKE_CASE : Tuple = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2"
__SCREAMING_SNAKE_CASE : str = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowercase__ ):
ans.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and number >= 0, "'number' must been an int and >= 0"
__SCREAMING_SNAKE_CASE : str = [] # this list will be returns of the function.
# potential prime number factors.
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2
__SCREAMING_SNAKE_CASE : Any = number
if number == 0 or number == 1:
ans.append(lowercase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowercase__ ):
while quotient != 1:
if is_prime(lowercase__ ) and (quotient % factor == 0):
ans.append(lowercase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE : Optional[Any] = prime_factorization(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = max(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE : int = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE : List[Any] = prime_factorization(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowercase__ ), "compare bust been from type bool"
return number % 2 == 0
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowercase__ ), "compare bust been from type bool"
return number % 2 != 0
def _UpperCamelCase ( lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ ) and (number > 2) and is_even(lowercase__ )
), "'number' must been an int, even and > 2"
__SCREAMING_SNAKE_CASE : Tuple = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__SCREAMING_SNAKE_CASE : Optional[Any] = get_prime_numbers(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = len(lowercase__ )
# run variable for while-loops.
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Tuple = None
# exit variable. for break up the loops
__SCREAMING_SNAKE_CASE : Any = True
while i < len_pn and loop:
__SCREAMING_SNAKE_CASE : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__SCREAMING_SNAKE_CASE : List[str] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (len(lowercase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE : Any = 0
while numbera != 0:
__SCREAMING_SNAKE_CASE : List[Any] = numbera % numbera
__SCREAMING_SNAKE_CASE : Optional[int] = numbera
__SCREAMING_SNAKE_CASE : Optional[int] = rest
# precondition
assert isinstance(lowercase__ , lowercase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__SCREAMING_SNAKE_CASE : Any = prime_factorization(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = prime_factorization(lowercase__ )
elif numbera == 1 or numbera == 1:
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = max(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__SCREAMING_SNAKE_CASE : int = prime_fac_a.count(lowercase__ )
__SCREAMING_SNAKE_CASE : str = prime_fac_a.count(lowercase__ )
for _ in range(max(lowercase__ , lowercase__ ) ):
ans *= n
else:
__SCREAMING_SNAKE_CASE : str = prime_fac_a.count(lowercase__ )
for _ in range(lowercase__ ):
ans *= n
done.append(lowercase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__SCREAMING_SNAKE_CASE : str = prime_fac_a.count(lowercase__ )
for _ in range(lowercase__ ):
ans *= n
done.append(lowercase__ )
# precondition
assert isinstance(lowercase__ , lowercase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'number' must been a positive int"
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowercase__ ):
ans += 1
# precondition
assert isinstance(lowercase__ , lowercase__ ) and is_prime(
lowercase__ ), "'ans' must been a prime number and from type int"
return ans
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert (
is_prime(lowercase__ ) and is_prime(lowercase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__SCREAMING_SNAKE_CASE : Optional[Any] = p_number_a + 1 # jump to the next number
__SCREAMING_SNAKE_CASE : Optional[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowercase__ ):
number += 1
while number < p_number_a:
ans.append(lowercase__ )
number += 1
# fetch the next prime number.
while not is_prime(lowercase__ ):
number += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and ans[0] != p_number_a
and ans[len(lowercase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n >= 1), "'n' must been int and >= 1"
__SCREAMING_SNAKE_CASE : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowercase__ )
# precondition
assert ans[0] == 1 and ans[len(lowercase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (
number > 1
), "'number' must been an int and >= 1"
__SCREAMING_SNAKE_CASE : str = get_divisors(lowercase__ )
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (divisors[0] == 1)
and (divisors[len(lowercase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__SCREAMING_SNAKE_CASE : Optional[Any] = gcd(abs(lowercase__ ) , abs(lowercase__ ) )
# precondition
assert (
isinstance(lowercase__ , lowercase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been a int and >= 0"
__SCREAMING_SNAKE_CASE : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _UpperCamelCase ( lowercase__ ):
assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been an int and >= 0"
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Optional[int] = 1 # this will be return
for _ in range(n - 1 ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ans
ans += fiba
__SCREAMING_SNAKE_CASE : Optional[Any] = tmp
return ans
| 9 |
from __future__ import annotations
__A : str = 1.60_21E-19 # units = C
def __UpperCamelCase ( _A : float , _A : float , _A : float , ) ->tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=() , _lowerCAmelCase=None , _lowerCAmelCase="no" , _lowerCAmelCase="29500" ) -> List[str]:
"""simple docstring"""
A : List[Any] = False
A : Any = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
A : Any = True
elif "IPython" in sys.modules:
A : str = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
A : Union[str, Any] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , _lowerCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
A : Tuple = 8
A : Any = PrepareForLaunch(_lowerCAmelCase , distributed_type="""TPU""" )
print(f'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*_lowerCAmelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase , master_addr="""127.0.01""" , master_port=_lowerCAmelCase , mixed_precision=_lowerCAmelCase ):
A : Tuple = PrepareForLaunch(_lowerCAmelCase , distributed_type="""MULTI_GPU""" )
print(f'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
A : Optional[Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=() , _lowerCAmelCase=2 ) -> Any:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
A : List[str] = PrepareForLaunch(_lowerCAmelCase , debug=_lowerCAmelCase )
start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="""fork""" )
| 350 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A : Union[str, Any] = nn.Linear(3, 4 )
A : Union[str, Any] = nn.BatchNormad(4 )
A : Optional[Any] = nn.Linear(4, 5 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, *lowerCamelCase__, **lowerCamelCase__ ):
return (args[0] + 1,) + args[1:], kwargs
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return output + 1
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Any = ModelHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
self.assertEqual(test_model._hf_hook, lowerCamelCase__ )
self.assertTrue(hasattr(lowerCamelCase__, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__, """_hf_hook""" ) )
self.assertFalse(hasattr(lowerCamelCase__, """_old_forward""" ) )
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Optional[int] = ModelHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__, append=lowerCamelCase__ )
self.assertEqual(isinstance(test_model._hf_hook, lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ), 2 )
self.assertTrue(hasattr(lowerCamelCase__, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__, """_hf_hook""" ) )
self.assertFalse(hasattr(lowerCamelCase__, """_old_forward""" ) )
def _lowerCAmelCase ( self ):
A : Any = ModelForTest()
A : Tuple = torch.randn(2, 3 )
A : Optional[int] = test_model(x + 1 )
A : List[str] = test_model(x + 2 )
A : List[str] = PreForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A : Optional[Any] = PreForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Tuple = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A : Any = SequentialHook(PreForwardHook(), PreForwardHook() )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Optional[Any] = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 )
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Any = torch.randn(2, 3 )
A : Any = test_model(lowerCamelCase__ )
A : List[Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : str = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1, atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A : Tuple = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Optional[int] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1, atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A : Dict = SequentialHook(PostForwardHook(), PostForwardHook() )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__, output + 2, atol=1e-5 )
def _lowerCAmelCase ( self ):
A : List[Any] = ModelForTest()
A : Tuple = torch.randn(2, 3 )
A : Union[str, Any] = test_model(lowerCamelCase__ )
A : List[Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1 ) )
self.assertTrue(outputa.requires_grad )
A : int = True
A : Tuple = test_model(lowerCamelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowerCAmelCase ( self ):
A : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device(0 ) )
self.assertEqual(model.lineara.weight.device, torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
A : str = torch.randn(2, 3 )
A : int = model(lowerCamelCase__ )
self.assertEqual(output.device, torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCamelCase__, AlignDevicesHook(io_same_device=lowerCamelCase__ ) )
A : int = torch.randn(2, 3 ).to(0 )
A : str = model(lowerCamelCase__ )
self.assertEqual(output.device, torch.device(0 ) )
def _lowerCAmelCase ( self ):
A : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : int = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : int = torch.randn(2, 3 )
A : List[Any] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
A : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : int = torch.randn(2, 3 )
A : str = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def _lowerCAmelCase ( self ):
A : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Optional[int] = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : List[str] = torch.randn(2, 3 )
A : Optional[int] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, offload_buffers=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : List[str] = torch.randn(2, 3 )
A : List[str] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def _lowerCAmelCase ( self ):
A : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : List[str] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Any = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : Optional[Any] = torch.randn(2, 3 )
A : Tuple = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, weights_map=model.state_dict(), offload_buffers=lowerCamelCase__, )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : List[Any] = torch.randn(2, 3 )
A : Dict = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
| 115 | 0 |
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 1_00_00_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = {1: 1}
for inputa in range(2 , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
SCREAMING_SNAKE_CASE__ = (3 * number) + 1
counter += 1
if inputa not in counters:
SCREAMING_SNAKE_CASE__ = counter
if counter > pre_counter:
SCREAMING_SNAKE_CASE__ = inputa
SCREAMING_SNAKE_CASE__ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 219 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Tuple , _lowercase : Optional[int]=None , **_lowercase : List[Any] ):
"""simple docstring"""
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , _lowercase , )
super().__init__(args=_lowercase , **_lowercase )
| 219 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCamelCase__ ( unittest.TestCase , lowerCamelCase__):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = load_tool('''text-to-speech''' )
self.tool.setup()
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : str = self.tool('''hey''' )
lowerCAmelCase__ : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] ,torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) ,) )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = self.tool('''hey''' )
lowerCAmelCase__ : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] ,torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) ,) )
| 94 |
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
lowerCAmelCase__ : Any = set()
# Replace all the whitespace in our sentence
lowerCAmelCase__ : List[Any] = input_str.replace(''' ''' ,'''''')
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCamelCase_) == 26
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase__ : Union[str, Any] = True
elif char.isupper():
lowerCAmelCase__ : str = True
return all(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def lowerCAmelCase__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCAmelCase__ : Optional[Any] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' ,setup=lowerCamelCase_))
print(timeit('''is_pangram_faster()''' ,setup=lowerCamelCase_))
print(timeit('''is_pangram_fastest()''' ,setup=lowerCamelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 94 | 1 |
"""simple docstring"""
def lowercase (_lowerCAmelCase = 200 ):
__lowerCAmelCase = [1, 2, 5, 10, 20, 50, 100, 200]
__lowerCAmelCase = [0] * (pence + 1)
__lowerCAmelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 301 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301 | 1 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> np.array:
return 1 / (1 + np.exp(-vector))
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> np.array:
return vector * sigmoid(1.702 * vector)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__ :
def __init__( self , A , A=2 , A=32 , A=16 , A=3 , A=True , A=True , A=32 , A=4 , A=[0, 1, 2, 3] , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=0.0_2 , A=3 , A=[1, 384, 24, 24] , A=True , A=None , ) -> Any:
'''simple docstring'''
a = parent
a = batch_size
a = image_size
a = patch_size
a = num_channels
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = backbone_out_indices
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = num_labels
a = backbone_featmap_shape
a = scope
a = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
a = (image_size // patch_size) ** 2
a = num_patches + 1
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=A , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCAmelCase_ ( self , A , A , A ) -> str:
'''simple docstring'''
a = DPTModel(config=A )
model.to(A )
model.eval()
a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
a = self.num_labels
a = DPTForDepthEstimation(A )
model.to(A )
model.eval()
a = model(A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self , A , A , A ) -> Dict:
'''simple docstring'''
a = self.num_labels
a = DPTForSemanticSegmentation(A )
model.to(A )
model.eval()
a = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a : Union[str, Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : Union[str, Any] = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : Optional[int] = False
a : List[Any] = False
a : int = False
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
a = DPTModelTester(self )
a = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(A )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*A )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
if model_class in get_values(A ):
continue
a = model_class(A )
model.to(A )
model.train()
a = self._prepare_for_class(A , A , return_labels=A )
a = model(**A ).loss
loss.backward()
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = False
a = True
if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing:
continue
a = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
a = self._prepare_for_class(A , A , return_labels=A )
a = model(**A ).loss
loss.backward()
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = _config_zero_init(A )
for model_class in self.all_model_classes:
a = model_class(config=A )
# Skip the check for the backbone
a = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
a = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
a = DPTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = "add"
with self.assertRaises(A ):
a = DPTForDepthEstimation(A )
def SCREAMING_SNAKE_CASE ( ) -> str:
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
@slow
class a__ ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
a = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(A )
a = prepare_img()
a = image_processor(images=A , return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
a = model(**A )
a = outputs.predicted_depth
# verify the predicted depth
a = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , A )
a = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , A , atol=1e-4 ) )
| 180 | 1 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Optional[Any] = str(bin(_UpperCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[str] = str(bin(_UpperCamelCase ) )[2:]
if shift_amount >= len(_UpperCamelCase ):
return "0b0"
lowercase : Tuple = binary_number[: len(_UpperCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : Union[str, Any] = '''0''' + str(bin(_UpperCamelCase ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : int = len(bin(_UpperCamelCase )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:]
lowercase : Dict = (
'''1''' + '''0''' * (binary_number_length - len(_UpperCamelCase )) + binary_number
)
if shift_amount >= len(_UpperCamelCase ):
return "0b" + binary_number[0] * len(_UpperCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_UpperCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 0 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Optional[Any] ):
snake_case_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : List[Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : str ):
snake_case_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Optional[int] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Union[str, Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : int ):
snake_case_ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : Any ):
# pass variant but use the non-variant filenames
snake_case_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : int ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
snake_case_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : str ):
snake_case_ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : Tuple ):
# pass variant but use the non-variant filenames
snake_case_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : List[str] ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
| 360 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Tuple = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
snake_case_ = "efficientnet"
def __init__( self : int , lowercase_ : int = 3 , lowercase_ : int = 600 , lowercase_ : float = 2.0 , lowercase_ : float = 3.1 , lowercase_ : int = 8 , lowercase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowercase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowercase_ : List[int] = [] , lowercase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase_ : float = 0.25 , lowercase_ : str = "swish" , lowercase_ : int = 2560 , lowercase_ : str = "mean" , lowercase_ : float = 0.02 , lowercase_ : float = 0.001 , lowercase_ : float = 0.99 , lowercase_ : float = 0.5 , lowercase_ : float = 0.2 , **lowercase_ : Optional[Any] , ):
super().__init__(**lowercase_ )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = width_coefficient
snake_case_ = depth_coefficient
snake_case_ = depth_divisor
snake_case_ = kernel_sizes
snake_case_ = in_channels
snake_case_ = out_channels
snake_case_ = depthwise_padding
snake_case_ = strides
snake_case_ = num_block_repeats
snake_case_ = expand_ratios
snake_case_ = squeeze_expansion_ratio
snake_case_ = hidden_act
snake_case_ = hidden_dim
snake_case_ = pooling_type
snake_case_ = initializer_range
snake_case_ = batch_norm_eps
snake_case_ = batch_norm_momentum
snake_case_ = dropout_rate
snake_case_ = drop_connect_rate
snake_case_ = sum(lowercase_ ) * 4
class a ( _lowerCamelCase ):
snake_case_ = version.parse("1.11" )
@property
def A_ ( self : Optional[int] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A_ ( self : List[str] ):
return 1e-5
| 72 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=2 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , _A=0 , ):
__A : Dict = parent
__A : List[str] = batch_size
__A : int = seq_length
__A : List[str] = is_training
__A : List[Any] = use_input_mask
__A : Dict = use_token_type_ids
__A : List[Any] = use_labels
__A : List[Any] = vocab_size
__A : Tuple = hidden_size
__A : Union[str, Any] = num_hidden_layers
__A : int = num_attention_heads
__A : Tuple = intermediate_size
__A : str = hidden_act
__A : Optional[int] = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : int = max_position_embeddings
__A : Dict = type_vocab_size
__A : int = type_sequence_label_size
__A : Optional[int] = initializer_range
__A : Any = num_labels
__A : Any = num_choices
__A : int = scope
__A : Tuple = projection_dim
def UpperCAmelCase_ ( self ):
__A : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Any = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__A : int = None
if self.use_token_type_ids:
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : str = None
__A : Tuple = None
__A : Optional[Any] = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__A : Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
__A : List[str] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : Dict = TFDPRContextEncoder(config=_A )
__A : str = model(_A , attention_mask=_A , token_type_ids=_A )
__A : Optional[Any] = model(_A , token_type_ids=_A )
__A : int = model(_A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : List[str] = TFDPRQuestionEncoder(config=_A )
__A : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )
__A : Any = model(_A , token_type_ids=_A )
__A : Dict = model(_A )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : int = TFDPRReader(config=_A )
__A : List[str] = model(_A , attention_mask=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.prepare_config_and_inputs()
(
__A
) : Optional[int] = config_and_inputs
__A : Optional[Any] = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class _A( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase : Any = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase : Optional[int] = False
UpperCamelCase : Tuple = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : str = False
UpperCamelCase : List[Any] = False
def UpperCAmelCase_ ( self ):
__A : List[Any] = TFDPRModelTester(self )
__A : Union[str, Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_A )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_A )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_A )
@slow
def UpperCAmelCase_ ( self ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Optional[Any] = TFDPRContextEncoder.from_pretrained(_A )
self.assertIsNotNone(_A )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Tuple = TFDPRContextEncoder.from_pretrained(_A )
self.assertIsNotNone(_A )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Dict = TFDPRQuestionEncoder.from_pretrained(_A )
self.assertIsNotNone(_A )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : str = TFDPRReader.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_tf
class _A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
__A : Dict = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
__A : Any = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
__A : Optional[Any] = model(_A )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__A : Optional[Any] = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 280 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = None
__a = BloomTokenizerFast
__a = BloomTokenizerFast
__a = True
__a = False
__a = """tokenizer_file"""
__a = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Any = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.get_rust_tokenizer()
__UpperCAmelCase : Optional[Any] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__UpperCAmelCase : int = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
__UpperCAmelCase : Dict = tokenizer.batch_encode_plus(UpperCamelCase )["""input_ids"""]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : int = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : Any=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCAmelCase : Dict = """This is a simple input"""
__UpperCAmelCase : str = ["""This is a simple input 1""", """This is a simple input 2"""]
__UpperCAmelCase : List[str] = ("""This is a simple input""", """This is a pair""")
__UpperCAmelCase : Dict = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.encode(UpperCamelCase , max_length=UpperCamelCase )
tokenizer_r.batch_encode_plus(UpperCamelCase , max_length=UpperCamelCase )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__UpperCAmelCase : Union[str, Any] = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCAmelCase : Optional[Any] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = next(iter(UpperCamelCase ) )["""premise"""] # pick up one data
__UpperCAmelCase : Any = list(sample_data.values() )
__UpperCAmelCase : Optional[Any] = list(map(tokenizer.encode , UpperCamelCase ) )
__UpperCAmelCase : List[Any] = [tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) for x in output_tokens]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 115 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
if num <= 0:
lowerCAmelCase = f'{num}: Invalid input, please enter a positive integer.'
raise ValueError(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [True] * (num + 1)
lowerCAmelCase = []
lowerCAmelCase = 2
lowerCAmelCase = int(math.sqrt(_SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , _SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
lowerCAmelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 187 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( _SCREAMING_SNAKE_CASE : int | str ) -> bool:
"""simple docstring"""
lowerCAmelCase = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def _snake_case ( _SCREAMING_SNAKE_CASE : int = 1_000_000 ) -> Dict:
"""simple docstring"""
lowerCAmelCase = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 187 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
snake_case : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=_lowerCamelCase , speech_processor=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , feature_extractor=_lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase = "auto" ):
if slice_size == "auto":
a :List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase , _lowerCamelCase=1_6000 , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
a :Union[str, Any] = self.speech_processor.feature_extractor(
_lowerCamelCase , return_tensors='''pt''' , sampling_rate=_lowerCamelCase ).input_features.to(self.device )
a :Optional[Any] = self.speech_model.generate(_lowerCamelCase , max_length=48_0000 )
a :List[Any] = self.speech_processor.tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , normalize=_lowerCamelCase )[
0
]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :Any = 1
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
a :Tuple = len(_lowerCamelCase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_lowerCamelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCamelCase , _lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_lowerCamelCase )}.''' )
# get prompt text embeddings
a :Tuple = self.tokenizer(
_lowerCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
a :str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
a :List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
a :List[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
a :int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
a , a , a :List[Any] = text_embeddings.shape
a :int = text_embeddings.repeat(1 , _lowerCamelCase , 1 )
a :Dict = text_embeddings.view(bs_embed * num_images_per_prompt , _lowerCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a :List[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a :List[str]
if negative_prompt is None:
a :int = [''''''] * batch_size
elif type(_lowerCamelCase ) is not type(_lowerCamelCase ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(_lowerCamelCase )} !='''
F''' {type(_lowerCamelCase )}.''' )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
a :str = [negative_prompt]
elif batch_size != len(_lowerCamelCase ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(_lowerCamelCase )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
a :str = negative_prompt
a :List[Any] = text_input_ids.shape[-1]
a :Optional[Any] = self.tokenizer(
_lowerCamelCase , padding='''max_length''' , max_length=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='''pt''' , )
a :Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
a :List[str] = uncond_embeddings.shape[1]
a :Any = uncond_embeddings.repeat(1 , _lowerCamelCase , 1 )
a :Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a :Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a :List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
a :Any = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
a :int = torch.randn(_lowerCamelCase , generator=_lowerCamelCase , device='''cpu''' , dtype=_lowerCamelCase ).to(
self.device )
else:
a :Optional[Any] = torch.randn(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=_lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
a :Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
a :int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a :Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a :List[str] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a :Union[str, Any] = {}
if accepts_eta:
a :str = eta
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
a :str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a :str = self.scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
# predict the noise residual
a :List[str] = self.unet(_lowerCamelCase , _lowerCamelCase , encoder_hidden_states=_lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
a , a :Dict = noise_pred.chunk(2 )
a :Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
a :int = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
a :Any = 1 / 0.1_8215 * latents
a :Tuple = self.vae.decode(_lowerCamelCase ).sample
a :Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a :Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a :Dict = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_lowerCamelCase , nsfw_content_detected=_lowerCamelCase )
| 94 |
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
while b:
a , a :Optional[Any] = b, a % b
return a
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b )
def __lowerCamelCase ( ):
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 94 | 1 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : str = data
UpperCAmelCase : Optional[Any] = None
def __repr__( self ) -> str:
'''simple docstring'''
return F"Node({self.data})"
class SCREAMING_SNAKE_CASE__ :
def __init__( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = None
def __iter__( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = self.head
while node:
yield node.data
UpperCAmelCase : Optional[Any] = node.next
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> str:
'''simple docstring'''
return "->".join([str(_SCREAMING_SNAKE_CASE ) for item in self] )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
UpperCAmelCase : Optional[Any] = self.head
for _ in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = current.next
UpperCAmelCase : str = data
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
self.insert_nth(0 , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
UpperCAmelCase : List[Any] = Node(_SCREAMING_SNAKE_CASE )
if self.head is None:
UpperCAmelCase : Tuple = new_node
elif index == 0:
UpperCAmelCase : Tuple = self.head # link new_node to head
UpperCAmelCase : int = new_node
else:
UpperCAmelCase : List[Any] = self.head
for _ in range(index - 1 ):
UpperCAmelCase : Optional[int] = temp.next
UpperCAmelCase : int = temp.next
UpperCAmelCase : Dict = new_node
def SCREAMING_SNAKE_CASE ( self ) -> None: # print every node data
'''simple docstring'''
print(self )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def SCREAMING_SNAKE_CASE ( self ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
UpperCAmelCase : int = self.head # default first node
if index == 0:
UpperCAmelCase : Dict = self.head.next
else:
UpperCAmelCase : Optional[Any] = self.head
for _ in range(index - 1 ):
UpperCAmelCase : Tuple = temp.next
UpperCAmelCase : Optional[int] = temp.next
UpperCAmelCase : int = temp.next.next
return delete_node.data
def SCREAMING_SNAKE_CASE ( self ) -> bool:
'''simple docstring'''
return self.head is None
def SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : int = self.head
while current:
# Store the current node's next node.
UpperCAmelCase : Tuple = current.next
# Make the current node's next point backwards
UpperCAmelCase : int = prev
# Make the previous node be the current node
UpperCAmelCase : Optional[int] = current
# Make the current node the next node (to progress iteration)
UpperCAmelCase : Union[str, Any] = next_node
# Return prev in order to put the head at the end
UpperCAmelCase : List[str] = prev
def _snake_case ( ):
UpperCAmelCase : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(UpperCamelCase ) == i
linked_list.insert_nth(UpperCamelCase , i + 1 )
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(UpperCamelCase ) == 9
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
UpperCAmelCase : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(-8 , 1 ) )
def _snake_case ( ):
UpperCAmelCase : int = [
-9,
100,
Node(77345112 ),
"""dlrow olleH""",
7,
5555,
0,
-192.55555,
"""Hello, world!""",
77.9,
Node(10 ),
None,
None,
12.20,
]
UpperCAmelCase : Tuple = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCAmelCase : List[Any] = linked_list.delete_head()
assert result == -9
assert (
str(UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCAmelCase : List[str] = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCAmelCase : Any = linked_list.delete_nth(10 )
assert result is None
assert (
str(UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCamelCase )
assert (
str(UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _snake_case ( ):
from doctest import testmod
testmod()
UpperCAmelCase : Optional[int] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(UpperCamelCase )
print("""\nReading/changing Node data using indexing:""" )
print(F"Element at Position 1: {linked_list[1]}" )
UpperCAmelCase : Any = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(UpperCamelCase )
print(F"length of linked_list is : {len(UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 76 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = value_function
UpperCAmelCase : Dict = unet
UpperCAmelCase : Union[str, Any] = scheduler
UpperCAmelCase : List[Any] = env
UpperCAmelCase : int = env.get_dataset()
UpperCAmelCase : Optional[int] = {}
for key in self.data.keys():
try:
UpperCAmelCase : Dict = self.data[key].mean()
except: # noqa: E722
pass
UpperCAmelCase : int = {}
for key in self.data.keys():
try:
UpperCAmelCase : Optional[Any] = self.data[key].std()
except: # noqa: E722
pass
UpperCAmelCase : Any = env.observation_space.shape[0]
UpperCAmelCase : str = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if type(_SCREAMING_SNAKE_CASE ) is dict:
return {k: self.to_torch(_SCREAMING_SNAKE_CASE ) for k, v in x_in.items()}
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ):
return x_in.to(self.unet.device )
return torch.tensor(_SCREAMING_SNAKE_CASE , device=self.unet.device )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
for key, val in cond.items():
UpperCAmelCase : Optional[Any] = val.clone()
return x_in
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = x.shape[0]
UpperCAmelCase : Optional[int] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCAmelCase : Tuple = torch.full((batch_size,) , _SCREAMING_SNAKE_CASE , device=self.unet.device , dtype=torch.long )
for _ in range(_SCREAMING_SNAKE_CASE ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCAmelCase : Dict = self.value_function(x.permute(0 , 2 , 1 ) , _SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : Optional[int] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCAmelCase : List[Any] = self.scheduler._get_variance(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = torch.exp(0.5 * posterior_variance )
UpperCAmelCase : str = model_std * grad
UpperCAmelCase : str = 0
UpperCAmelCase : Any = x.detach()
UpperCAmelCase : int = x + scale * grad
UpperCAmelCase : Any = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
UpperCAmelCase : Optional[int] = self.unet(x.permute(0 , 2 , 1 ) , _SCREAMING_SNAKE_CASE ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCAmelCase : Any = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , predict_epsilon=_SCREAMING_SNAKE_CASE )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCAmelCase : Dict = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
UpperCAmelCase : int = self.to_torch(_SCREAMING_SNAKE_CASE )
return x, y
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = self.normalize(_SCREAMING_SNAKE_CASE , """observations""" )
UpperCAmelCase : int = obs[None].repeat(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCAmelCase : Dict = {0: self.to_torch(_SCREAMING_SNAKE_CASE )}
UpperCAmelCase : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCAmelCase : str = randn_tensor(_SCREAMING_SNAKE_CASE , device=self.unet.device )
UpperCAmelCase : Any = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
UpperCAmelCase : str = self.to_torch(_SCREAMING_SNAKE_CASE )
# run the diffusion process
UpperCAmelCase , UpperCAmelCase : Any = self.run_diffusion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# sort output trajectories by value
UpperCAmelCase : List[str] = y.argsort(0 , descending=_SCREAMING_SNAKE_CASE ).squeeze()
UpperCAmelCase : Any = x[sorted_idx]
UpperCAmelCase : Dict = sorted_values[:, :, : self.action_dim]
UpperCAmelCase : int = actions.detach().cpu().numpy()
UpperCAmelCase : List[str] = self.de_normalize(_SCREAMING_SNAKE_CASE , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCAmelCase : Any = 0
else:
# if we didn't run value guiding, select a random action
UpperCAmelCase : Optional[int] = np.random.randint(0 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = denorm_actions[selected_index, 0]
return denorm_actions
| 76 | 1 |
from __future__ import annotations
def snake_case ( snake_case__ :list[int]) -> list[int]: # This function is recursive
_A = len(snake_case__)
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_A = array[0]
_A = False
_A = 1
_A = []
while not is_found and i < array_length:
if array[i] < pivot:
_A = True
_A = [element for element in array[i:] if element >= array[i]]
_A = longest_subsequence(snake_case__)
if len(snake_case__) > len(snake_case__):
_A = temp_array
else:
i += 1
_A = [element for element in array[1:] if element >= pivot]
_A = [pivot, *longest_subsequence(snake_case__)]
if len(snake_case__) > len(snake_case__):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def snake_case ( snake_case__ :int , snake_case__ :List[str] , snake_case__ :Union[str, Any]) -> str:
# Initialise PyTorch model
_A = AlbertConfig.from_json_file(snake_case__)
print(F'''Building PyTorch model from configuration: {config}''')
_A = AlbertForPreTraining(snake_case__)
# Load weights from tf checkpoint
load_tf_weights_in_albert(snake_case__ , snake_case__ , snake_case__)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 180 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__lowerCamelCase = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__lowerCamelCase = max(
mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , )
__lowerCamelCase = val
return f[i][j]
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__lowerCamelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__lowerCamelCase = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : list , UpperCamelCase__ : list ) -> List[str]:
"""simple docstring"""
if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
__lowerCamelCase = len(UpperCamelCase__ )
if num_items != len(UpperCamelCase__ ):
__lowerCamelCase = (
'The number of weights must be the same as the number of values.\n'
F"""But got {num_items} weights and {len(UpperCamelCase__ )} values"""
)
raise ValueError(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
if not isinstance(wt[i] , UpperCamelCase__ ):
__lowerCamelCase = (
'All weights must be integers but got weight of '
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = set()
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return optimal_val, example_optional_set
def lowerCamelCase_ ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : set ) -> int:
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
optimal_set.add(UpperCamelCase__ )
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ )
if __name__ == "__main__":
__A = [3, 2, 4, 4]
__A = [4, 3, 2, 3]
__A = 4
__A = 6
__A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__A , __A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__A , __A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 348 |
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 348 | 1 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( _lowercase ):
def __init__( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str=7_6_8 ) -> Tuple:
super().__init__(__lowerCAmelCase )
__lowerCAmelCase: Optional[int] = proj_size
__lowerCAmelCase: Optional[int] = CLIPVisionModel(__lowerCAmelCase )
__lowerCAmelCase: Tuple = PaintByExampleMapper(__lowerCAmelCase )
__lowerCAmelCase: Optional[Any] = nn.LayerNorm(config.hidden_size )
__lowerCAmelCase: Any = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__lowerCAmelCase: int = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=False ) -> Dict:
__lowerCAmelCase: int = self.model(pixel_values=__lowerCAmelCase )
__lowerCAmelCase: int = clip_output.pooler_output
__lowerCAmelCase: Optional[Any] = self.mapper(latent_states[:, None] )
__lowerCAmelCase: List[str] = self.final_layer_norm(__lowerCAmelCase )
__lowerCAmelCase: Optional[int] = self.proj_out(__lowerCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class A_ ( nn.Module ):
def __init__( self : int , UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
super().__init__()
__lowerCAmelCase: Optional[Any] = (config.num_hidden_layers + 1) // 5
__lowerCAmelCase: Optional[Any] = config.hidden_size
__lowerCAmelCase: List[str] = 1
__lowerCAmelCase: Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , activation_fn='gelu' , attention_bias=__lowerCAmelCase )
for _ in range(__lowerCAmelCase )
] )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
for block in self.blocks:
__lowerCAmelCase: Union[str, Any] = block(__lowerCAmelCase )
return hidden_states
| 322 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "unispeech"
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = feat_extract_norm
_lowerCamelCase : List[Any] = feat_extract_activation
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : Tuple = list(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : List[str] = conv_bias
_lowerCamelCase : List[str] = num_conv_pos_embeddings
_lowerCamelCase : Tuple = num_conv_pos_embedding_groups
_lowerCamelCase : List[str] = len(self.conv_dim )
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Optional[Any] = feat_proj_dropout
_lowerCamelCase : Optional[int] = final_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[str] = num_ctc_classes
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = do_stable_layer_norm
_lowerCamelCase : Tuple = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Any = apply_spec_augment
_lowerCamelCase : Dict = mask_time_prob
_lowerCamelCase : List[str] = mask_time_length
_lowerCamelCase : Optional[Any] = mask_time_min_masks
_lowerCamelCase : List[str] = mask_feature_prob
_lowerCamelCase : int = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : Optional[Any] = num_codevectors_per_group
_lowerCamelCase : int = num_codevector_groups
_lowerCamelCase : List[Any] = contrastive_logits_temperature
_lowerCamelCase : List[str] = feat_quantizer_dropout
_lowerCamelCase : Dict = num_negatives
_lowerCamelCase : Optional[int] = codevector_dim
_lowerCamelCase : List[Any] = proj_codevector_dim
_lowerCamelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCamelCase : Union[str, Any] = ctc_loss_reduction
_lowerCamelCase : Any = ctc_zero_infinity
# pretraining loss
_lowerCamelCase : str = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 72 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase = [0, 25, 50]
UpperCamelCase = [25, 50, 75]
UpperCamelCase = fuzz.membership.trimf(X, abca)
UpperCamelCase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase = np.ones(75)
UpperCamelCase = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 65 |
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if num <= 0:
A_ : Optional[int] = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = [True] * (num + 1)
A_ : Tuple = []
A_ : Union[str, Any] = 2
A_ : Any = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 65 | 1 |
from manim import *
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = Rectangle(height=0.5 , width=0.5 )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
snake_case_ = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
snake_case_ = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
snake_case_ = Text("CPU" , font_size=24 )
snake_case_ = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
snake_case_ = [mem.copy() for i in range(1 )]
snake_case_ = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
snake_case_ = Text("GPU" , font_size=24 )
snake_case_ = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.align_to(__lowercase , __lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__lowercase )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
snake_case_ = Text("Model" , font_size=24 )
snake_case_ = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) , )
snake_case_ = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , )
snake_case_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=2.5 ) , Write(__lowercase ) , Write(__lowercase ) )
self.add(__lowercase )
snake_case_ = []
snake_case_ = []
snake_case_ = []
for i, rect in enumerate(__lowercase ):
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
cpu_target.move_to(__lowercase )
cpu_target.generate_target()
snake_case_ = 0.46 / 4
snake_case_ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__lowercase , buff=0.0 )
cpu_targs.append(__lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__lowercase ) )
second_animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(*__lowercase )
self.wait()
| 187 |
import numpy
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray , __lowercase : numpy.ndarray ):
"""simple docstring"""
snake_case_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
snake_case_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
snake_case_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
snake_case_ = numpy.random.rand(3 , 1 )
# Real output values provided.
snake_case_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
snake_case_ = numpy.zeros(output_array.shape )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
snake_case_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
snake_case_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case__ ( self : Optional[Any] , __lowercase : numpy.ndarray , __lowercase : int , __lowercase : bool ):
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
snake_case_ = self.feedforward()
self.back_propagation()
if give_loss:
snake_case_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"Iteration {iteration} Loss: {loss}" )
def snake_case__ ( self : Union[str, Any] , __lowercase : numpy.ndarray ):
"""simple docstring"""
snake_case_ = input_arr
snake_case_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return (value) * (1 - (value))
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
snake_case_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
snake_case_ = TwoHiddenLayerNeuralNetwork(
input_array=_A , output_array=_A )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_A , iterations=10 , give_loss=_A )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 187 | 1 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> list:
for i in range(len(__lowerCAmelCase ) - 1 ,0 ,-1 ):
__snake_case : List[Any] = False
for j in range(__lowerCAmelCase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
__snake_case : Optional[int] = unsorted[j - 1], unsorted[j]
__snake_case : Any = True
for j in range(__lowerCAmelCase ):
if unsorted[j] > unsorted[j + 1]:
__snake_case : Tuple = unsorted[j + 1], unsorted[j]
__snake_case : int = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
A__ : Any = [int(item) for item in user_input.split(''',''')]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 371 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Optional[int] = {}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''llama'''
A__ = ['''past_key_values''']
def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = vocab_size
__snake_case : List[str] = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = num_key_value_heads
__snake_case : int = hidden_act
__snake_case : Any = initializer_range
__snake_case : Any = rms_norm_eps
__snake_case : Union[str, Any] = pretraining_tp
__snake_case : Optional[int] = use_cache
__snake_case : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
__snake_case : Optional[Any] = self.rope_scaling.get('type' , __a )
__snake_case : Tuple = self.rope_scaling.get('factor' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 0 | 0 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( _a = "."):
for dir_path, dir_names, filenames in os.walk(_a):
SCREAMING_SNAKE_CASE : Dict = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_a)[1] in (".py", ".ipynb"):
yield os.path.join(_a , _a).lstrip("./")
def lowerCamelCase__ ( _a):
return f"{i * ' '}*" if i else "\n##"
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : int = old_path.split(os.sep)
for i, new_part in enumerate(new_path.split(os.sep)):
if (i + 1 > len(_a) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(_a)} {new_part.replace('_' , ' ').title()}")
return new_path
def lowerCamelCase__ ( _a = "."):
SCREAMING_SNAKE_CASE : Dict = ""
for filepath in sorted(good_file_paths(_a)):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = os.path.split(_a)
if filepath != old_path:
SCREAMING_SNAKE_CASE : Optional[int] = print_path(_a , _a)
SCREAMING_SNAKE_CASE : Dict = (filepath.count(os.sep) + 1) if filepath else 0
SCREAMING_SNAKE_CASE : int = f"{filepath}/{filename}".replace(" " , "%20")
SCREAMING_SNAKE_CASE : str = os.path.splitext(filename.replace("_" , " ").title())[0]
print(f"{md_prefix(_a)} [{filename}]({url})")
if __name__ == "__main__":
print_directory_md('.')
| 76 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _a , _a):
# Load checkpoint
SCREAMING_SNAKE_CASE : int = torch.load(_a , map_location="cpu")
SCREAMING_SNAKE_CASE : Dict = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : List[str] = v
else:
SCREAMING_SNAKE_CASE : int = v
SCREAMING_SNAKE_CASE : int = chkpt["params"]
SCREAMING_SNAKE_CASE : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray))}
SCREAMING_SNAKE_CASE : List[Any] = chkpt["dico_word2id"]
SCREAMING_SNAKE_CASE : List[Any] = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@" , ""): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"Save PyTorch model to {pytorch_weights_dump_path}")
torch.save(_a , _a)
print(f"Save configuration file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
print(f"Save vocab file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 76 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ReformerTokenizer
lowerCAmelCase : str = ReformerTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = False
lowerCAmelCase : Dict = True
def lowerCamelCase_ ( self : List[Any] ):
super().setUp()
UpperCAmelCase_ = ReformerTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__snake_case ) , 10_00 )
def lowerCamelCase_ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowerCamelCase_ ( self : Optional[int] ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(__snake_case )
UpperCAmelCase_ = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase_ = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase_ = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__snake_case )
UpperCAmelCase_ = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase_ ( self : Dict , __snake_case : Optional[Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
# Simple input
UpperCAmelCase_ = '''This is a simple input'''
UpperCAmelCase_ = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='''max_length''' , )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='''max_length''' , )
def lowerCamelCase_ ( self : Tuple ):
pass
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = ReformerTokenizer(__snake_case , keep_accents=__snake_case )
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : Tuple ):
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase_ = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@require_torch
@slow
def lowerCamelCase_ ( self : List[Any] ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ = ''' '''.join(__snake_case )
UpperCAmelCase_ = self.big_tokenizer.encode_plus(__snake_case , return_tensors='''pt''' )
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
UpperCAmelCase_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ = encoded_sequence['''input_ids'''].shape
UpperCAmelCase_ = ReformerModel(__snake_case )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__snake_case )
model(**__snake_case )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=__snake_case , sequences=__snake_case , )
| 354 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_A )
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase : ClassVar[Features] = Features({'audio': Audio()} )
lowerCAmelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
lowerCAmelCase : str = "audio"
lowerCAmelCase : str = "labels"
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : List[Any] ):
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __snake_case ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
UpperCAmelCase_ = copy.deepcopy(self )
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowerCamelCase_ ( self : Tuple ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 177 | 0 |
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCAmelCase : List[str] =mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
UpperCAmelCase : Dict =max(
mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
UpperCAmelCase : Any =val
return f[i][j]
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] =[[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
UpperCAmelCase : List[str] =max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
UpperCAmelCase : Tuple =dp[i - 1][w_]
return dp[n][w_], dp
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
'''simple docstring'''
if not (isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(__lowerCAmelCase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
UpperCAmelCase : List[str] =len(__lowerCAmelCase )
if num_items != len(__lowerCAmelCase ):
UpperCAmelCase : Optional[int] =(
'''The number of weights must be the same as the number of values.\n'''
f'''But got {num_items} weights and {len(__lowerCAmelCase )} values'''
)
raise ValueError(__lowerCAmelCase )
for i in range(__lowerCAmelCase ):
if not isinstance(wt[i] , __lowerCAmelCase ):
UpperCAmelCase : int =(
'''All weights must be integers but got weight of '''
f'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(__lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : Dict =knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : set =set()
_construct_solution(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return optimal_val, example_optional_set
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , __lowerCAmelCase , __lowerCAmelCase )
else:
optimal_set.add(__lowerCAmelCase )
_construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , j - wt[i - 1] , __lowerCAmelCase )
if __name__ == "__main__":
__snake_case = [3, 2, 4, 4]
__snake_case = [4, 3, 2, 3]
__snake_case = 4
__snake_case = 6
__snake_case = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__snake_case , __snake_case = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__snake_case , __snake_case = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 348 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
_lowercase : Union[str, Any] = ''
while len(lowerCamelCase_ ) % 3 != 0:
_lowercase : List[str] = '0' + bin_string
_lowercase : Union[str, Any] = [
bin_string[index : index + 3]
for index in range(len(lowerCamelCase_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_lowercase : Dict = 0
for index, val in enumerate(lowerCamelCase_ ):
oct_val += int(2 ** (2 - index) * int(lowerCamelCase_ ) )
oct_string += str(lowerCamelCase_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : List[str] = CTRLTokenizer
lowercase_ : Union[str, Any] = False
lowercase_ : Optional[int] = False
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase : List[Any] = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_lowercase : List[Any] = dict(zip(lowerCamelCase, range(len(lowerCamelCase))))
_lowercase : Optional[int] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_lowercase : Union[str, Any] = {'unk_token': '<unk>'}
_lowercase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(lowerCamelCase) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(lowerCamelCase))
def UpperCamelCase ( self, **lowerCamelCase) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = 'adapt react readapt apt'
_lowercase : Tuple = 'adapt react readapt apt'
return input_text, output_text
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
_lowercase : List[str] = 'adapt react readapt apt'
_lowercase : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_lowercase : Optional[Any] = tokenizer.tokenize(lowerCamelCase)
self.assertListEqual(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = tokens + [tokenizer.unk_token]
_lowercase : int = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase), lowerCamelCase)
| 84 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCamelCase__ = ['bert-base-uncased', 'bert-base-cased']
UpperCamelCase__ = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class A ( tf.keras.Model ):
def __init__(self : Any , __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = AutoConfig.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = TFAutoModel.from_config(__UpperCAmelCase )
def lowercase_ (self : int , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer(__UpperCAmelCase )
UpperCAmelCase__ = self.bert(**__UpperCAmelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class A ( unittest.TestCase ):
def lowercase_ (self : List[str] ) -> List[str]:
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = [
BertTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCAmelCase__ = [TFBertTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__UpperCAmelCase , use_fast_bert_tokenizer=__UpperCAmelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase__ = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="tf" , padding="longest" )
UpperCAmelCase__ = tf_tokenizer(__UpperCAmelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def lowercase_ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase__ = tf_tokenizer(self.paired_sentences )
UpperCAmelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def lowercase_ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase__ = tf.function(__UpperCAmelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase__ = tf.constant(__UpperCAmelCase )
UpperCAmelCase__ = compiled_tokenizer(__UpperCAmelCase )
UpperCAmelCase__ = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase_ (self : str ) -> List[str]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase__ = ModelToSave(tokenizer=__UpperCAmelCase )
UpperCAmelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCAmelCase__ = model(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase__ = Path(__UpperCAmelCase ) / "saved.model"
model.save(__UpperCAmelCase )
UpperCAmelCase__ = tf.keras.models.load_model(__UpperCAmelCase )
UpperCAmelCase__ = loaded_model(__UpperCAmelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 65 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : torch.FloatTensor
class A ( nn.Module ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Optional[Any]=("DownEncoderBlock2D",) , __UpperCAmelCase : int=(6_4,) , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=3_2 , __UpperCAmelCase : str="silu" , __UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = torch.nn.Convad(
__UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
# down
UpperCAmelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_down_block(
__UpperCAmelCase , num_layers=self.layers_per_block , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
self.down_blocks.append(__UpperCAmelCase )
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# out
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = 2 * out_channels if double_z else out_channels
UpperCAmelCase__ = nn.Convad(block_out_channels[-1] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : List[Any] , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = x
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : int ):
def custom_forward(*__UpperCAmelCase : Optional[Any] ):
return module(*__UpperCAmelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
for down_block in self.down_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase )
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase__ = down_block(__UpperCAmelCase )
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase )
# post-process
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : List[Any] , __UpperCAmelCase : str=3 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Optional[int]=("UpDecoderBlock2D",) , __UpperCAmelCase : str=(6_4,) , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Tuple=3_2 , __UpperCAmelCase : Any="silu" , __UpperCAmelCase : Any="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = layers_per_block
UpperCAmelCase__ = nn.Convad(
__UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase__ = None
UpperCAmelCase__ = nn.ModuleList([] )
UpperCAmelCase__ = in_channels if norm_type == "spatial" else None
# mid
UpperCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , )
# up
UpperCAmelCase__ = list(reversed(__UpperCAmelCase ) )
UpperCAmelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = output_channel
UpperCAmelCase__ = reversed_block_out_channels[i]
UpperCAmelCase__ = i == len(__UpperCAmelCase ) - 1
UpperCAmelCase__ = get_up_block(
__UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , prev_output_channel=__UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , resnet_time_scale_shift=__UpperCAmelCase , )
self.up_blocks.append(__UpperCAmelCase )
UpperCAmelCase__ = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase__ = SpatialNorm(block_out_channels[0] , __UpperCAmelCase )
else:
UpperCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__UpperCAmelCase , eps=1E-6 )
UpperCAmelCase__ = nn.SiLU()
UpperCAmelCase__ = nn.Convad(block_out_channels[0] , __UpperCAmelCase , 3 , padding=1 )
UpperCAmelCase__ = False
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = z
UpperCAmelCase__ = self.conv_in(__UpperCAmelCase )
UpperCAmelCase__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase : str ):
def custom_forward(*__UpperCAmelCase : List[str] ):
return module(*__UpperCAmelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
else:
# middle
UpperCAmelCase__ = self.mid_block(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase__ = up_block(__UpperCAmelCase , __UpperCAmelCase )
# post-process
if latent_embeds is None:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.conv_norm_out(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.conv_act(__UpperCAmelCase )
UpperCAmelCase__ = self.conv_out(__UpperCAmelCase )
return sample
class A ( nn.Module ):
def __init__(self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]="random" , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Union[str, Any]=True ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = n_e
UpperCAmelCase__ = vq_embed_dim
UpperCAmelCase__ = beta
UpperCAmelCase__ = legacy
UpperCAmelCase__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase__ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase__ = self.used.shape[0]
UpperCAmelCase__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase__ = self.re_embed
UpperCAmelCase__ = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
UpperCAmelCase__ = n_e
UpperCAmelCase__ = sane_index_shape
def lowercase_ (self : str , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
UpperCAmelCase__ = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase__ = match.argmax(-1 )
UpperCAmelCase__ = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase__ = self.unknown_index
return new.reshape(__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = inds.shape
assert len(__UpperCAmelCase ) > 1
UpperCAmelCase__ = inds.reshape(ishape[0] , -1 )
UpperCAmelCase__ = self.used.to(__UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase__ = 0 # simply set to zero
UpperCAmelCase__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __UpperCAmelCase )
return back.reshape(__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase__ = torch.argmin(torch.cdist(__UpperCAmelCase , self.embedding.weight ) , dim=1 )
UpperCAmelCase__ = self.embedding(__UpperCAmelCase ).view(z.shape )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase__ = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.remap_to_used(__UpperCAmelCase )
UpperCAmelCase__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
if self.remap is not None:
UpperCAmelCase__ = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase__ = self.unmap_to_all(__UpperCAmelCase )
UpperCAmelCase__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase__ = self.embedding(__UpperCAmelCase )
if shape is not None:
UpperCAmelCase__ = z_q.view(__UpperCAmelCase )
# reshape back to match original input shape
UpperCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A ( UpperCAmelCase_ ):
def __init__(self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str=False ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parameters
UpperCAmelCase__ , UpperCAmelCase__ = torch.chunk(__UpperCAmelCase , 2 , dim=1 )
UpperCAmelCase__ = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase__ = deterministic
UpperCAmelCase__ = torch.exp(0.5 * self.logvar )
UpperCAmelCase__ = torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase__ = UpperCAmelCase__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
UpperCAmelCase__ = randn_tensor(
self.mean.shape , generator=__UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase__ = self.mean + self.std * sample
return x
def lowercase_ (self : str , __UpperCAmelCase : int=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any=[1, 2, 3] ) -> Dict:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.mean
| 65 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 354 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = """ResNetConfig"""
# Base docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = """tiger cat"""
UpperCAmelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]:
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple:
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention:
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ResNetConfig
lowerCAmelCase_ = '''resnet'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
UpperCAmelCase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 295 | 0 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[str] ,_lowerCamelCase : Any ) -> Union[str, Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray ,_lowerCamelCase : Optional[str] ,_lowerCamelCase : Optional[str] = None ) -> Optional[Any]:
_lowerCAmelCase : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
_lowerCAmelCase : Optional[Any] = to_pil_image(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : str = pil_image.size
_lowerCAmelCase : Dict = pytesseract.image_to_data(_lowerCamelCase ,lang=_lowerCamelCase ,output_type="""dict""" ,config=_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_lowerCAmelCase : List[str] = [idx for idx, word in enumerate(_lowerCamelCase ) if not word.strip()]
_lowerCAmelCase : List[Any] = [word for idx, word in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Tuple = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Dict = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Dict = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Optional[Any] = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_lowerCAmelCase : List[str] = []
for x, y, w, h in zip(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_lowerCamelCase )
# finally, normalize the bounding boxes
_lowerCAmelCase : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) )
assert len(_lowerCamelCase ) == len(_lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["pixel_values"]
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BILINEAR , a__ = True , a__ = None , a__ = "" , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : Tuple = size if size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase : Dict = get_size_dict(a__ )
_lowerCAmelCase : int = do_resize
_lowerCAmelCase : Optional[int] = size
_lowerCAmelCase : Optional[Any] = resample
_lowerCAmelCase : List[Any] = apply_ocr
_lowerCAmelCase : Dict = ocr_lang
_lowerCAmelCase : Tuple = tesseract_config
def __A ( self , a__ , a__ , a__ = PILImageResampling.BILINEAR , a__ = None , **a__ , ):
_lowerCAmelCase : Dict = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_lowerCAmelCase : Optional[int] = (size["""height"""], size["""width"""])
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
_lowerCAmelCase : str = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : str = size if size is not None else self.size
_lowerCAmelCase : Optional[Any] = get_size_dict(a__ )
_lowerCAmelCase : List[Any] = resample if resample is not None else self.resample
_lowerCAmelCase : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_lowerCAmelCase : Dict = ocr_lang if ocr_lang is not None else self.ocr_lang
_lowerCAmelCase : int = tesseract_config if tesseract_config is not None else self.tesseract_config
_lowerCAmelCase : str = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : Optional[int] = [to_numpy_array(a__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Optional[Any] = []
for image in images:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = apply_tesseract(a__ , a__ , a__ )
words_batch.append(a__ )
boxes_batch.append(a__ )
if do_resize:
_lowerCAmelCase : List[Any] = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_lowerCAmelCase : Union[str, Any] = [flip_channel_order(a__ ) for image in images]
_lowerCAmelCase : Tuple = [to_channel_dimension_format(a__ , a__ ) for image in images]
_lowerCAmelCase : Any = BatchFeature(data={"""pixel_values""": images} , tensor_type=a__ )
if apply_ocr:
_lowerCAmelCase : Optional[Any] = words_batch
_lowerCAmelCase : Optional[int] = boxes_batch
return data
| 44 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_A : List[str] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = ["audio_values", "audio_mask"]
def __init__( self : Any , A : Union[str, Any]=2_0_4_8 , A : Any=1 , A : int=[1_6, 1_6] , A : Any=1_2_8 , A : List[Any]=4_4_1_0_0 , A : Dict=8_6 , A : Dict=2_0_4_8 , A : str=0.0 , **A : Union[str, Any] , ) ->List[Any]:
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , **A , )
lowerCamelCase__ : Dict = spectrogram_length
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Any = feature_size // self.patch_size[1]
lowerCamelCase__ : Union[str, Any] = n_fft
lowerCamelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase__ : Optional[Any] = sampling_rate
lowerCamelCase__ : Optional[Any] = padding_value
lowerCamelCase__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=A , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __lowerCamelCase ( self : List[Any] , A : np.array ) ->np.ndarray:
lowerCamelCase__ : Any = spectrogram(
A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCamelCase__ : Any = log_spec[:, :-1]
lowerCamelCase__ : int = log_spec - 20.0
lowerCamelCase__ : int = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : Optional[Union[str, TensorType]] = None , A : Optional[bool] = True , A : Optional[int] = None , A : bool = False , A : bool = False , **A : str , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCamelCase__ : List[str] = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
lowerCamelCase__ : Any = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : Optional[int] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase__ : Optional[int] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : int = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase__ : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A ):
lowerCamelCase__ : Dict = [np.asarray(A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase__ : Optional[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase__ : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase__ : List[str] = np.array(A ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase__ : str = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase__ : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase__ : Tuple = padded_audio_features * self.padding_value
for i in range(len(A ) ):
lowerCamelCase__ : int = audio_features[i]
lowerCamelCase__ : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase__ : Optional[int] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCamelCase__ : Tuple = {'''audio_values''': padded_audio_features}
lowerCamelCase__ : Union[str, Any] = BatchFeature(data=A , tensor_type=A )
return encoded_inputs
| 360 |
import os
from pathlib import Path
def _a ( ) -> Tuple:
"""simple docstring"""
from torch.utils.cpp_extension import load
lowerCamelCase__ : List[Any] = Path(UpperCAmelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
lowerCamelCase__ : Any = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , UpperCAmelCase , with_cuda=UpperCAmelCase , extra_include_paths=[str(UpperCAmelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 265 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def _A ( snake_case , snake_case = 16 ) -> str:
_lowercase : int = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowercase : Any = load_dataset("glue" , "mrpc" )
def tokenize_function(snake_case ):
# max_length=None => use the model max length (it's actually the default)
_lowercase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowercase : Optional[Any] = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase : int = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowercase : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowercase : str = 16
elif accelerator.mixed_precision != "no":
_lowercase : Any = 8
else:
_lowercase : List[Any] = None
return tokenizer.pad(
__UpperCAmelCase , padding="longest" , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_lowercase : Optional[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
_lowercase : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def _A ( snake_case , snake_case ) -> List[str]:
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __UpperCAmelCase ) == "1":
_lowercase : Union[str, Any] = 2
# Initialize accelerator
_lowercase : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase : Tuple = config['''lr''']
_lowercase : Tuple = int(config["num_epochs"] )
_lowercase : str = int(config["seed"] )
_lowercase : Dict = int(config["batch_size"] )
_lowercase : str = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_lowercase : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowercase : int = batch_size // MAX_GPU_BATCH_SIZE
_lowercase : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__UpperCAmelCase )
_lowercase : Optional[Any] = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_lowercase : int = AdamW(params=model.parameters() , lr=__UpperCAmelCase )
# Instantiate scheduler
_lowercase : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase : List[str] = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowercase : Union[str, Any] = model(**__UpperCAmelCase )
_lowercase : Any = outputs.loss
_lowercase : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_lowercase : List[str] = 0
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase : str = model(**__UpperCAmelCase )
_lowercase : List[Any] = outputs.logits.argmax(dim=-1 )
_lowercase : Optional[Any] = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_lowercase : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowercase : Tuple = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCAmelCase , references=__UpperCAmelCase , )
_lowercase : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __UpperCAmelCase )
def _A ( ) -> List[str]:
_lowercase : Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__UpperCAmelCase , default=__UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowercase : Optional[Any] = parser.parse_args()
_lowercase : List[Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 250 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
lowercase__: List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__: Union[str, Any] = 0
lowercase__: List[Any] = 2
while digits < n:
index += 1
lowercase__: Dict = len(str(fibonacci(__UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0_0 ) -> int:
return fibonacci_digits_index(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 177 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _lowercase ( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
print('Loading config file...' )
def flatten_yaml_as_dict(UpperCamelCase_ , UpperCamelCase_="" , UpperCamelCase_="." ):
SCREAMING_SNAKE_CASE__ = []
for k, v in d.items():
SCREAMING_SNAKE_CASE__ = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase_ , UpperCamelCase_ , sep=UpperCamelCase_ ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = argparse.Namespace()
with open(UpperCamelCase_ , 'r' ) as yaml_file:
try:
SCREAMING_SNAKE_CASE__ = yaml.load(UpperCamelCase_ , Loader=yaml.FullLoader )
SCREAMING_SNAKE_CASE__ = flatten_yaml_as_dict(UpperCamelCase_ )
for k, v in flat_cfg.items():
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(UpperCamelCase_ , str(UpperCamelCase_ ) ) )
return config
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = MobileViTVaConfig()
SCREAMING_SNAKE_CASE__ = False
# dataset
if task_name.startswith('imagenet1k_' ):
SCREAMING_SNAKE_CASE__ = 1000
if int(task_name.strip().split('_' )[-1] ) == 384:
SCREAMING_SNAKE_CASE__ = 384
else:
SCREAMING_SNAKE_CASE__ = 256
SCREAMING_SNAKE_CASE__ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
SCREAMING_SNAKE_CASE__ = 21000
if int(task_name.strip().split('_' )[-1] ) == 384:
SCREAMING_SNAKE_CASE__ = 384
else:
SCREAMING_SNAKE_CASE__ = 256
SCREAMING_SNAKE_CASE__ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
SCREAMING_SNAKE_CASE__ = 151
SCREAMING_SNAKE_CASE__ = 512
SCREAMING_SNAKE_CASE__ = 'ade20k-id2label.json'
SCREAMING_SNAKE_CASE__ = True
elif task_name.startswith('voc_' ):
SCREAMING_SNAKE_CASE__ = 21
SCREAMING_SNAKE_CASE__ = 512
SCREAMING_SNAKE_CASE__ = 'pascal-voc-id2label.json'
SCREAMING_SNAKE_CASE__ = True
# orig_config
SCREAMING_SNAKE_CASE__ = load_orig_config_file(UpperCamelCase_ )
assert getattr(UpperCamelCase_ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(UpperCamelCase_ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
SCREAMING_SNAKE_CASE__ = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
return config
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = val
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=False ) -> Tuple:
'''simple docstring'''
if base_model:
SCREAMING_SNAKE_CASE__ = ''
else:
SCREAMING_SNAKE_CASE__ = 'mobilevitv2.'
SCREAMING_SNAKE_CASE__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
SCREAMING_SNAKE_CASE__ = k[8:]
else:
SCREAMING_SNAKE_CASE__ = k
if ".block." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('.block.' , '.' )
if ".conv." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('conv_1.' , F'{model_prefix}conv_stem.' )
for i in [1, 2]:
if F'layer_{i}.' in k:
SCREAMING_SNAKE_CASE__ = k_new.replace(F'layer_{i}.' , F'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'layer_{i}.0.' in k:
SCREAMING_SNAKE_CASE__ = k_new.replace(F'layer_{i}.0.' , F'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if F'layer_{i}.1.local_rep.0.' in k:
SCREAMING_SNAKE_CASE__ = k_new.replace(F'layer_{i}.1.local_rep.0.' , F'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if F'layer_{i}.1.local_rep.1.' in k:
SCREAMING_SNAKE_CASE__ = k_new.replace(F'layer_{i}.1.local_rep.1.' , F'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
SCREAMING_SNAKE_CASE__ = [0, 1]
elif i == 4:
SCREAMING_SNAKE_CASE__ = [0, 1, 2, 3]
elif i == 5:
SCREAMING_SNAKE_CASE__ = [0, 1, 2]
for j in j_in:
if F'layer_{i}.1.global_rep.{j}.' in k:
SCREAMING_SNAKE_CASE__ = k_new.replace(
F'layer_{i}.1.global_rep.{j}.' , F'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if F'layer_{i}.1.global_rep.{j+1}.' in k:
SCREAMING_SNAKE_CASE__ = k_new.replace(
F'layer_{i}.1.global_rep.{j+1}.' , F'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if F'layer_{i}.1.conv_proj.' in k:
SCREAMING_SNAKE_CASE__ = k_new.replace(F'layer_{i}.1.conv_proj.' , F'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
SCREAMING_SNAKE_CASE__ = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def _lowercase ( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(UpperCamelCase_ )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_mobilevitva_config(UpperCamelCase_ , UpperCamelCase_ )
# load original state_dict
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase_ , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
SCREAMING_SNAKE_CASE__ = MobileViTVaForSemanticSegmentation(UpperCamelCase_ ).eval()
SCREAMING_SNAKE_CASE__ = False
else:
SCREAMING_SNAKE_CASE__ = MobileViTVaForImageClassification(UpperCamelCase_ ).eval()
SCREAMING_SNAKE_CASE__ = False
# remove and rename some keys of load the original model
SCREAMING_SNAKE_CASE__ = checkpoint
remove_unused_keys(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# load modified state_dict
model.load_state_dict(UpperCamelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase_ )
# verify classification model
if task_name.startswith('imagenet' ):
SCREAMING_SNAKE_CASE__ = outputs.logits
SCREAMING_SNAKE_CASE__ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
SCREAMING_SNAKE_CASE__ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(F'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
__snake_case = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 169 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__snake_case = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
__snake_case = get_tests_dir("""fixtures/vocab.json""")
__snake_case = get_tests_dir("""fixtures""")
class lowercase__ ( unittest.TestCase ):
A__ : List[Any] =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = 0
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , 'vocab.json' ) )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
# save in new folder
processor.save_pretrained(UpperCAmelCase_ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'r' ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCAmelCase_ )
config_dict.pop('processor_class' )
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'w' ) as f:
f.write(json.dumps(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
# save in new folder
processor.save_pretrained(UpperCAmelCase_ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'r' ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCAmelCase_ )
config_dict.pop('processor_class' )
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'w' ) as f:
f.write(json.dumps(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(UpperCAmelCase_ )
# copy relevant files
copyfile(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , 'w' ) as f:
f.write('{}' )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
SCREAMING_SNAKE_CASE__ = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
SCREAMING_SNAKE_CASE__ = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def A_ ( self : Union[str, Any] ):
try:
AutoConfig.register('custom' , UpperCAmelCase_ )
AutoFeatureExtractor.register(UpperCAmelCase_ , UpperCAmelCase_ )
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCAmelCase_ , 'vocab.txt' )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = CustomTokenizer(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = CustomProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Tuple ):
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[int] =False
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[int] =False
class lowercase__ ( _UpperCAmelCase ):
A__ : Dict ="""AutoFeatureExtractor"""
A__ : Optional[int] ="""AutoTokenizer"""
A__ : str =False
try:
AutoConfig.register('custom' , UpperCAmelCase_ )
AutoFeatureExtractor.register(UpperCAmelCase_ , UpperCAmelCase_ )
AutoTokenizer.register(UpperCAmelCase_ , slow_tokenizer_class=UpperCAmelCase_ )
AutoProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class lowercase__ ( unittest.TestCase ):
A__ : List[Any] =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def A_ ( cls : str ):
SCREAMING_SNAKE_CASE__ = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def A_ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCAmelCase_ , 'test-processor' ) , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(new_processor.feature_extractor , UpperCAmelCase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCAmelCase_ , 'test-processor-org' ) , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token , organization='valid_org' , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(new_processor.feature_extractor , UpperCAmelCase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Optional[Any] ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCAmelCase_ , 'vocab.txt' )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = CustomTokenizer(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = CustomProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
SCREAMING_SNAKE_CASE__ = Repository(UpperCAmelCase_ , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(UpperCAmelCase_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCAmelCase_ , 'tokenizer_config.json' ) ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCAmelCase_ )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , 'custom_processing.py' ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=UpperCAmelCase_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
| 169 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.