code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import pprint
import requests
lowercase = '''https://zenquotes.io/api'''
def UpperCAmelCase ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def UpperCAmelCase ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
lowercase = random_quotes()
pprint.pprint(response)
| 711 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase ( A : int ):
'''simple docstring'''
return sum(int(A ) for c in str(abs(A ) ) )
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A : Callable , A : int ) -> None:
_UpperCAmelCase = f'{func.__name__}({value})'
_UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = 42
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Node(1 )
_UpperCAmelCase = Node(2 )
_UpperCAmelCase = Node(3 )
_UpperCAmelCase = Node(4 )
_UpperCAmelCase = Node(5 )
return tree
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
_UpperCAmelCase = []
if root is None:
return output
_UpperCAmelCase = deque([root] )
while process_queue:
_UpperCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCAmelCase ( A : Node | None , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
def populate_output(A : Node | None , A : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(A , A )
return output
def UpperCAmelCase ( A : Node | None , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
def populate_output(A : Node | None , A : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(A , A )
return output
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
if root is None:
return []
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = height(A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(A , A ) )
_UpperCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(A , A ) )
_UpperCAmelCase = 0
return output
def UpperCAmelCase ( ): # Main function for testing.
'''simple docstring'''
_UpperCAmelCase = make_tree()
print(f'In-order Traversal: {inorder(A )}' )
print(f'Pre-order Traversal: {preorder(A )}' )
print(f'Post-order Traversal: {postorder(A )}' , '\n' )
print(f'Height of Tree: {height(A )}' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(A ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(A ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(A , level=A ) )
print('\nZigZag order Traversal: ' )
print(zigzag(A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 712 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
create_all_state(1 , A , A , [] , A )
return result
def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def UpperCAmelCase ( A : list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*A )
if __name__ == "__main__":
lowercase = 4
lowercase = 2
lowercase = generate_all_combinations(n, k)
print_all_state(total_list)
| 24 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( A : str , A : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
_UpperCAmelCase = DatasetInfosDict.from_directory(A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( A : str , A : DatasetInfo ):
'''simple docstring'''
_UpperCAmelCase = str(A )
dataset_info.write_to_directory(A )
_UpperCAmelCase = DatasetInfo.from_directory(A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A , 'dataset_info.json' ) )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(A )
_UpperCAmelCase = yaml.safe_load(A )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( A : Tuple , A : DatasetInfosDict ):
'''simple docstring'''
_UpperCAmelCase = str(A )
dataset_infos_dict.write_to_directory(A )
_UpperCAmelCase = DatasetInfosDict.from_directory(A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A , 'README.md' ) )
| 713 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase ( A : Path , A : list ):
'''simple docstring'''
_UpperCAmelCase = '\n'.join(A )
Path(A ).open('w' ).writelines(A )
lowercase = '''patrickvonplaten/t5-tiny-random'''
lowercase = '''sshleifer/bart-tiny-random'''
lowercase = '''sshleifer/tiny-mbart'''
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case ) -> str:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(snake_case , snake_case )
_UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(snake_case , 'argv' , snake_case ):
run_generate()
assert Path(snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase_ ( self ) -> str:
self.run_eval_tester(snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
self.run_eval_tester(snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> Dict:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist groรartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = str(tmp_dir / 'scores.json' )
_UpperCAmelCase = str(tmp_dir / 'val.target' )
_dump_articles(snake_case , text['en'] )
_dump_articles(snake_case , text['de'] )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(snake_case , 'argv' , snake_case ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args']
_UpperCAmelCase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case ).exists()
os.remove(Path(snake_case ) )
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase = logging.get_logger(__name__)
lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A )
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
| 24 | 0 |
"""simple docstring"""
import os
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(A ) )
_UpperCAmelCase = os.path.join(A , 'triangle.txt' )
with open(A ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = []
for line in triangle:
_UpperCAmelCase = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(A ) )
a.append(A )
for i in range(1 , len(A ) ):
for j in range(len(a[i] ) ):
_UpperCAmelCase = a[i - 1][j] if j != len(a[i - 1] ) else 0
_UpperCAmelCase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(A , A )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 715 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = generator('Something there' )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
_UpperCAmelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
with self.assertRaises(snake_case ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
_UpperCAmelCase = 3
_UpperCAmelCase = generator(
'Something there' , num_return_sequences=snake_case , num_beams=snake_case , )
_UpperCAmelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case , snake_case )
_UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case )
self.assertEqual(
snake_case , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
_UpperCAmelCase = generator.model.config.eos_token_id
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = generator(
['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , )
self.assertEqual(
snake_case , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
| 24 | 0 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCAmelCase ( A : Tuple ):
'''simple docstring'''
for param in module.parameters():
_UpperCAmelCase = False
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def UpperCAmelCase ( A : Dict ):
'''simple docstring'''
_UpperCAmelCase = plt.imshow(A )
fig.axes.get_xaxis().set_visible(A )
fig.axes.get_yaxis().set_visible(A )
plt.show()
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = datetime.now()
_UpperCAmelCase = current_time.strftime('%H:%M:%S' )
return timestamp
| 716 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_UpperCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowercase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase ( A : Optional[int] , A : Dict , A : List[Any] , A : Union[str, Any] , A : List[Any] , A : List[str] ):
'''simple docstring'''
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_UpperCAmelCase = 'lm_head'
_UpperCAmelCase = getattr(A , A )
if weight_type is not None:
_UpperCAmelCase = getattr(A , A ).shape
else:
_UpperCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase ( A : str , A : Dict , A : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(A )[0].split('.' )[-2]
_UpperCAmelCase = mapped_key.replace('*' , A )
if "weight_g" in name:
_UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
_UpperCAmelCase = 'weight_v'
elif "bias" in name:
_UpperCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase = 'weight'
else:
_UpperCAmelCase = None
set_recursively(A , A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase ( A : Union[str, Any] , A : List[Any] , A : Union[str, Any] , A : Optional[int] , A : int ):
'''simple docstring'''
_UpperCAmelCase = full_name.split('conv_layers.' )[-1]
_UpperCAmelCase = name.split('.' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(A )
@torch.no_grad()
def UpperCAmelCase ( A : str , A : List[str] , A : Any=None , A : Any=None , A : Dict=True ):
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = UniSpeechConfig.from_pretrained(A )
else:
_UpperCAmelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_UpperCAmelCase = Dictionary.load_from_json(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.eos_index
_UpperCAmelCase = len(target_dict.symbols )
_UpperCAmelCase = os.path.join(A , 'vocab.json' )
if not os.path.isdir(A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) )
return
os.makedirs(A , exist_ok=A )
_UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_UpperCAmelCase = 42
_UpperCAmelCase = 43
with open(A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(A , A )
_UpperCAmelCase = WavaVecaPhonemeCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , )
_UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
_UpperCAmelCase = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
_UpperCAmelCase = UniSpeechForCTC(A )
else:
_UpperCAmelCase = UniSpeechForPreTraining(A )
if is_finetuned:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_UpperCAmelCase = model[0].eval()
recursively_load_weights(A , A , A )
hf_unispeech.save_pretrained(A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowercase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 717 |
"""simple docstring"""
import os
lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(A ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase ( A : str = "/p089_roman.txt" ):
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(A ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(A )
_UpperCAmelCase = generate_roman_numerals(A )
savings += len(A ) - len(A )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
_UpperCAmelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
| 24 | 0 |
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
return number | (1 << position)
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
return number & ~(1 << position)
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
return number ^ (1 << position)
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
return ((number >> position) & 1) == 1
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
import os
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' )
with open(A ) as file_hand:
return str(sum(int(A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 24 | 0 |
"""simple docstring"""
def UpperCAmelCase ( A : Optional[Any] , A : Optional[int] , A : Dict , A : Dict , A : Optional[Any] , A : Tuple ):
'''simple docstring'''
if index == r:
for j in range(A ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_UpperCAmelCase = arr[i]
combination_util(A , A , A , index + 1 , A , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(A , A , A , A , A , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCAmelCase ( A : Any , A : str , A : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(A , A , A , 0 , A , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 720 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase = logging.get_logger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , *snake_case , **snake_case ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 24 | 0 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str:
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any:
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case )
| 700 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''beit'''
def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str:
super().__init__(**snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_mask_token
_UpperCAmelCase = use_absolute_position_embeddings
_UpperCAmelCase = use_relative_position_bias
_UpperCAmelCase = use_shared_relative_position_bias
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase = out_indices
_UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = semantic_loss_ignore_index
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 0 |
"""simple docstring"""
from collections import deque
def UpperCAmelCase ( A : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = len(A )
_UpperCAmelCase = deque()
_UpperCAmelCase = [False for _ in range(A )]
_UpperCAmelCase = [-1 for _ in range(A )]
_UpperCAmelCase = index_of[:]
def strong_connect(A : List[Any] , A : List[str] , A : Optional[int] ):
_UpperCAmelCase = index # the number when this node is seen
_UpperCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(A )
_UpperCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
_UpperCAmelCase = strong_connect(A , A , A )
_UpperCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_UpperCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_UpperCAmelCase = []
_UpperCAmelCase = stack.pop()
_UpperCAmelCase = False
component.append(A )
while w != v:
_UpperCAmelCase = stack.pop()
_UpperCAmelCase = False
component.append(A )
components.append(A )
return index
_UpperCAmelCase = []
for v in range(A ):
if index_of[v] == -1:
strong_connect(A , 0 , A )
return components
def UpperCAmelCase ( A : List[Any] , A : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = [[] for _ in range(A )]
for u, v in edges:
g[u].append(A )
return g
if __name__ == "__main__":
# Test
lowercase = 7
lowercase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowercase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowercase = [(u, v) for u, v in zip(source, target)]
lowercase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 701 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_05_22, type=int)
lowercase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowercase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowercase = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase = [0] * args.vocab_size
for k, v in counter.items():
lowercase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 24 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = StableDiffusionSAGPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase = CLIPTextModel(snake_case )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Tuple:
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_UpperCAmelCase = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = '.'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_UpperCAmelCase = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = '.'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_UpperCAmelCase = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = '.'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sag_pipe(
[prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
_UpperCAmelCase = output.images
assert image.shape == (1, 512, 768, 3)
| 702 |
"""simple docstring"""
from itertools import permutations
def UpperCAmelCase ( A : tuple ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(A ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase ( A : int = 10 ):
'''simple docstring'''
return sum(
int(''.join(map(A , A ) ) )
for num in permutations(range(A ) )
if is_substring_divisible(A ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 0 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase ( A : Path , A : list ):
'''simple docstring'''
_UpperCAmelCase = '\n'.join(A )
Path(A ).open('w' ).writelines(A )
lowercase = '''patrickvonplaten/t5-tiny-random'''
lowercase = '''sshleifer/bart-tiny-random'''
lowercase = '''sshleifer/tiny-mbart'''
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case ) -> str:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(snake_case , snake_case )
_UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(snake_case , 'argv' , snake_case ):
run_generate()
assert Path(snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase_ ( self ) -> str:
self.run_eval_tester(snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
self.run_eval_tester(snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> Dict:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist groรartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = str(tmp_dir / 'scores.json' )
_UpperCAmelCase = str(tmp_dir / 'val.target' )
_dump_articles(snake_case , text['en'] )
_dump_articles(snake_case , text['de'] )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(snake_case , 'argv' , snake_case ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args']
_UpperCAmelCase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case ).exists()
os.remove(Path(snake_case ) )
| 703 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = '''โ'''
lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowercase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowercase = {
'''facebook/mbart-large-en-ro''': 10_24,
'''facebook/mbart-large-cc25''': 10_24,
}
# fmt: off
lowercase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase = []
_UpperCAmelCase = []
def __init__( self , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=None , snake_case=None , snake_case=None , snake_case = None , snake_case=None , **snake_case , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , tokenizer_file=snake_case , src_lang=snake_case , tgt_lang=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | 'โ' | 's' | 'โde' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | 'โ' | 's' | 'โde' | '-' | 'โa'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case )
}
_UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_UpperCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_UpperCAmelCase = src_lang if src_lang is not None else 'en_XX'
_UpperCAmelCase = self.lang_code_to_id[self._src_lang]
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Optional[Any]:
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , snake_case ) -> str:
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCamelCase_ ( self ) -> List[Any]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self , snake_case ) -> None:
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase_ ( self , snake_case , snake_case = None , snake_case = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
_UpperCAmelCase = [1] * len(self.prefix_tokens )
_UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case )) + suffix_ones
return prefix_ones + ([0] * len(snake_case )) + ([0] * len(snake_case )) + suffix_ones
def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , **snake_case ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(snake_case , add_special_tokens=snake_case , return_tensors=snake_case , **snake_case )
_UpperCAmelCase = self.convert_tokens_to_ids(snake_case )
_UpperCAmelCase = tgt_lang_id
return inputs
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , snake_case ) -> List[str]:
return self.sp_model.encode(snake_case , out_type=snake_case )
def lowerCamelCase_ ( self , snake_case ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self , snake_case ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self , snake_case ) -> Optional[Any]:
_UpperCAmelCase = ''.join(snake_case ).replace(snake_case , ' ' ).strip()
return out_string
def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , 'wb' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
def lowerCamelCase_ ( self , snake_case , snake_case = "en_XX" , snake_case = None , snake_case = "ro_RO" , **snake_case , ) -> BatchEncoding:
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(snake_case , snake_case , **snake_case )
def lowerCamelCase_ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase_ ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase_ ( self , snake_case ) -> None:
_UpperCAmelCase = self.lang_code_to_id[src_lang]
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
def lowerCamelCase_ ( self , snake_case ) -> None:
_UpperCAmelCase = self.lang_code_to_id[lang]
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
| 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = KandinskyImgaImgPipeline
_UpperCAmelCase = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
_UpperCAmelCase = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_UpperCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase = False
@property
def lowerCamelCase_ ( self ) -> List[Any]:
return 32
@property
def lowerCamelCase_ ( self ) -> Dict:
return 32
@property
def lowerCamelCase_ ( self ) -> Optional[Any]:
return self.time_input_dim
@property
def lowerCamelCase_ ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self ) -> Any:
return 100
@property
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def lowerCamelCase_ ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_UpperCAmelCase = MultilingualCLIP(snake_case )
_UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_UpperCAmelCase = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_UpperCAmelCase = UNetaDConditionModel(**snake_case )
return model
@property
def lowerCamelCase_ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = self.dummy_tokenizer
_UpperCAmelCase = self.dummy_unet
_UpperCAmelCase = self.dummy_movq
_UpperCAmelCase = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.00085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_UpperCAmelCase = DDIMScheduler(**snake_case )
_UpperCAmelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> List[str]:
_UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case ) ).to(snake_case )
_UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case )
# create init_image
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case ) ).to(snake_case )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((256, 256) )
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
_UpperCAmelCase = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = pipe(**self.get_dummy_inputs(snake_case ) )
_UpperCAmelCase = output.images
_UpperCAmelCase = pipe(
**self.get_dummy_inputs(snake_case ) , return_dict=snake_case , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_UpperCAmelCase = 'A red cartoon frog, 4k'
_UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case )
_UpperCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipeline.to(snake_case )
pipeline.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_UpperCAmelCase , _UpperCAmelCase = pipe_prior(
snake_case , generator=snake_case , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_UpperCAmelCase = pipeline(
snake_case , image=snake_case , image_embeds=snake_case , negative_image_embeds=snake_case , generator=snake_case , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
| 705 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase__ ( A, A ):
'''simple docstring'''
_UpperCAmelCase = '''swin'''
_UpperCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]:
super().__init__(**snake_case )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(snake_case )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) )
_UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = StableDiffusionLDMaDPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase = CLIPTextModel(snake_case )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Dict:
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case )
_UpperCAmelCase = ldmad_pipe.to(snake_case )
ldmad_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = ldmad_pipe(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1]
_UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCAmelCase = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
_UpperCAmelCase = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case )
_UpperCAmelCase = ldmad_pipe.to(snake_case )
ldmad_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = 3 * [inputs['prompt']]
# forward
_UpperCAmelCase = ldmad_pipe(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase = depth_slice_a[0, -3:, -1]
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = 3 * [inputs.pop('prompt' )]
_UpperCAmelCase = ldmad_pipe.tokenizer(
snake_case , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase = text_inputs['input_ids'].to(snake_case )
_UpperCAmelCase = ldmad_pipe.text_encoder(snake_case )[0]
_UpperCAmelCase = prompt_embeds
# forward
_UpperCAmelCase = ldmad_pipe(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=snake_case )
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case )
_UpperCAmelCase = ldmad_pipe.to(snake_case )
ldmad_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = 'french fries'
_UpperCAmelCase = ldmad_pipe(**snake_case , negative_prompt=snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1]
_UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCAmelCase = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
_UpperCAmelCase = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self , snake_case , snake_case="cpu" , snake_case=torch.floataa , snake_case=0 ) -> Union[str, Any]:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = np.random.RandomState(snake_case ).standard_normal((1, 4, 64, 64) )
_UpperCAmelCase = torch.from_numpy(snake_case ).to(device=snake_case , dtype=snake_case )
_UpperCAmelCase = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
_UpperCAmelCase = ldmad_pipe.to(snake_case )
ldmad_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_inputs(snake_case )
_UpperCAmelCase = ldmad_pipe(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1].flatten()
_UpperCAmelCase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_UpperCAmelCase = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
_UpperCAmelCase = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self , snake_case , snake_case="cpu" , snake_case=torch.floataa , snake_case=0 ) -> List[str]:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = np.random.RandomState(snake_case ).standard_normal((1, 4, 64, 64) )
_UpperCAmelCase = torch.from_numpy(snake_case ).to(device=snake_case , dtype=snake_case )
_UpperCAmelCase = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(snake_case )
ldmad_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_inputs(snake_case )
_UpperCAmelCase = ldmad_pipe(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = 0.495586
_UpperCAmelCase = 0.33795515
_UpperCAmelCase = 112.48518
_UpperCAmelCase = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(snake_case )
ldmad_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_inputs(snake_case )
_UpperCAmelCase = ldmad_pipe(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = 0.4194127
_UpperCAmelCase = 0.35375586
_UpperCAmelCase = 0.5638502
_UpperCAmelCase = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 706 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str:
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any:
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case )
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
lowercase = [True] * 1_00_00_01
lowercase = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
lowercase = False
i += 1
def UpperCAmelCase ( A : int ):
'''simple docstring'''
return seive[n]
def UpperCAmelCase ( A : int ):
'''simple docstring'''
return any(digit in '02468' for digit in str(A ) )
def UpperCAmelCase ( A : int = 100_0000 ):
'''simple docstring'''
_UpperCAmelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(A ) and not contains_an_even_digit(A ):
_UpperCAmelCase = str(A )
_UpperCAmelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(A ) )]
if all(is_prime(A ) for i in list_nums ):
result.append(A )
return result
def UpperCAmelCase ( ):
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 707 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) )
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = CvtModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = CvtForImageClassification(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = CvtModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCamelCase_ ( self ) -> str:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCamelCase_ ( self ) -> int:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
def check_hidden_states_output(snake_case , snake_case , snake_case ):
_UpperCAmelCase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(snake_case ) , snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self ) -> Dict:
pass
@slow
def lowerCamelCase_ ( self ) -> Dict:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = CvtModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
_UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 24 | 0 |
"""simple docstring"""
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase ( A : int ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A )
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A , id=A )
| 708 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( A : int , A : int , A : int ):
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_UpperCAmelCase = b * b - 4 * a * c
_UpperCAmelCase = (-b + sqrt(A )) / (2 * a)
_UpperCAmelCase = (-b - sqrt(A )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 24 | 0 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) )
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = CvtModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = CvtForImageClassification(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = CvtModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCamelCase_ ( self ) -> str:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCamelCase_ ( self ) -> int:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
def check_hidden_states_output(snake_case , snake_case , snake_case ):
_UpperCAmelCase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(snake_case ) , snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self ) -> Dict:
pass
@slow
def lowerCamelCase_ ( self ) -> Dict:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = CvtModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
_UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 709 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = BarthezTokenizer
_UpperCAmelCase = BarthezTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def lowerCamelCase_ ( self ) -> Optional[int]:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case )
_UpperCAmelCase = tokenizer
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case ) , 101122 )
def lowerCamelCase_ ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsรฉ.'
_UpperCAmelCase = tokenizer.tokenize(snake_case )
_UpperCAmelCase = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
# fmt: off
_UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
'Le transformeur est un modรจle d\'apprentissage profond introduit en 2017, '
'utilisรฉ principalement dans le domaine du traitement automatique des langues (TAL).',
'ร l\'instar des rรฉseaux de neurones rรฉcurrents (RNN), les transformeurs sont conรงus '
'pour gรฉrer des donnรฉes sรฉquentielles, telles que le langage naturel, pour des tรขches '
'telles que la traduction et la synthรจse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
| 24 | 0 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase = logging.getLogger(__name__)
lowercase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default=A, metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''The input training data file (a text file).'''} )
_UpperCAmelCase = field(
default=A, metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
_UpperCAmelCase = field(default=A, metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
_UpperCAmelCase = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_UpperCAmelCase = field(
default=1 / 6, metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
}, )
_UpperCAmelCase = field(
default=5, metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
_UpperCAmelCase = field(
default=-1, metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ):
'''simple docstring'''
def _dataset(A : List[Any] , A : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , )
return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelWithLMHead.from_config(A )
model.resize_token_embeddings(len(A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
_UpperCAmelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_UpperCAmelCase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_UpperCAmelCase = (
get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_UpperCAmelCase = (
get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_UpperCAmelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_UpperCAmelCase = DataCollatorForWholeWordMask(
tokenizer=A , mlm_probability=data_args.mlm_probability )
else:
_UpperCAmelCase = DataCollatorForLanguageModeling(
tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , )
# Training
if training_args.do_train:
_UpperCAmelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = {'perplexity': perplexity}
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , A , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(A )
return results
def UpperCAmelCase ( A : int ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 710 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = DiTPipeline
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
_UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]:
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = pipe(**snake_case ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1E-3 )
def lowerCamelCase_ ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 24 | 0 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase = logging.get_logger(__name__)
@add_end_docstrings(A )
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , **snake_case ) -> List[Any]:
super().__init__(**snake_case )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , snake_case , **snake_case ) -> Tuple:
return super().__call__(snake_case , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> str:
_UpperCAmelCase = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_UpperCAmelCase = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowerCamelCase_ ( self , snake_case , snake_case=None , snake_case="This is a photo of {}." ) -> int:
_UpperCAmelCase = load_image(snake_case )
_UpperCAmelCase = self.image_processor(images=[image] , return_tensors=self.framework )
_UpperCAmelCase = candidate_labels
_UpperCAmelCase = [hypothesis_template.format(snake_case ) for x in candidate_labels]
_UpperCAmelCase = self.tokenizer(snake_case , return_tensors=self.framework , padding=snake_case )
_UpperCAmelCase = [text_inputs]
return inputs
def lowerCamelCase_ ( self , snake_case ) -> Optional[int]:
_UpperCAmelCase = model_inputs.pop('candidate_labels' )
_UpperCAmelCase = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , snake_case ):
_UpperCAmelCase = text_inputs[0]
else:
# Batching case.
_UpperCAmelCase = text_inputs[0][0]
_UpperCAmelCase = self.model(**snake_case , **snake_case )
_UpperCAmelCase = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowerCamelCase_ ( self , snake_case ) -> Optional[int]:
_UpperCAmelCase = model_outputs.pop('candidate_labels' )
_UpperCAmelCase = model_outputs['logits'][0]
if self.framework == "pt":
_UpperCAmelCase = logits.softmax(dim=-1 ).squeeze(-1 )
_UpperCAmelCase = probs.tolist()
if not isinstance(snake_case , snake_case ):
_UpperCAmelCase = [scores]
elif self.framework == "tf":
_UpperCAmelCase = stable_softmax(snake_case , axis=-1 )
_UpperCAmelCase = probs.numpy().tolist()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_UpperCAmelCase = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(snake_case , snake_case ) , key=lambda snake_case : -x[0] )
]
return result
| 711 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase ( A : int ):
'''simple docstring'''
return sum(int(A ) for c in str(abs(A ) ) )
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A : Callable , A : int ) -> None:
_UpperCAmelCase = f'{func.__name__}({value})'
_UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=None , snake_case=True , ) -> Dict:
_UpperCAmelCase = size if size is not None else {'shortest_edge': 20}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_flip_channel_order
def lowerCamelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MobileViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = MobileViTImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(snake_case , 'center_crop' ) )
self.assertTrue(hasattr(snake_case , 'do_flip_channel_order' ) )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> Tuple:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self ) -> List[str]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self ) -> Dict:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 712 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
create_all_state(1 , A , A , [] , A )
return result
def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def UpperCAmelCase ( A : list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*A )
if __name__ == "__main__":
lowercase = 4
lowercase = 2
lowercase = generate_all_combinations(n, k)
print_all_state(total_list)
| 24 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=False , snake_case=True , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20}
_UpperCAmelCase = do_thumbnail
_UpperCAmelCase = do_align_axis
_UpperCAmelCase = do_pad
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def lowerCamelCase_ ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'do_thumbnail' ) )
self.assertTrue(hasattr(snake_case , 'do_align_long_axis' ) )
self.assertTrue(hasattr(snake_case , 'do_pad' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def lowerCamelCase_ ( self ) -> Any:
pass
@is_flaky()
def lowerCamelCase_ ( self ) -> Optional[Any]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def lowerCamelCase_ ( self ) -> List[Any]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def lowerCamelCase_ ( self ) -> Dict:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 713 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase ( A : Path , A : list ):
'''simple docstring'''
_UpperCAmelCase = '\n'.join(A )
Path(A ).open('w' ).writelines(A )
lowercase = '''patrickvonplaten/t5-tiny-random'''
lowercase = '''sshleifer/bart-tiny-random'''
lowercase = '''sshleifer/tiny-mbart'''
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case ) -> str:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(snake_case , snake_case )
_UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(snake_case , 'argv' , snake_case ):
run_generate()
assert Path(snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase_ ( self ) -> str:
self.run_eval_tester(snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
self.run_eval_tester(snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> Dict:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist groรartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = str(tmp_dir / 'scores.json' )
_UpperCAmelCase = str(tmp_dir / 'val.target' )
_dump_articles(snake_case , text['en'] )
_dump_articles(snake_case , text['de'] )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(snake_case , 'argv' , snake_case ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args']
_UpperCAmelCase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case ).exists()
os.remove(Path(snake_case ) )
| 24 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase = logging.get_logger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = ['''input_features''', '''attention_mask''']
def __init__( self , snake_case=80 , snake_case=16000 , snake_case=80 , snake_case=0.0 , snake_case=True , snake_case=True , snake_case=True , **snake_case , ) -> Tuple:
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
_UpperCAmelCase = num_mel_bins
_UpperCAmelCase = do_ceptral_normalize
_UpperCAmelCase = normalize_means
_UpperCAmelCase = normalize_vars
_UpperCAmelCase = True
def lowerCamelCase_ ( self , snake_case , ) -> np.ndarray:
_UpperCAmelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
_UpperCAmelCase = torch.from_numpy(snake_case ).unsqueeze(0 )
_UpperCAmelCase = ta_kaldi.fbank(snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCamelCase_ ( snake_case , snake_case , snake_case = True , snake_case = True , snake_case = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
_UpperCAmelCase = x[:input_length].mean(axis=0 )
_UpperCAmelCase = np.subtract(snake_case , snake_case )
if normalize_vars:
_UpperCAmelCase = x[:input_length].std(axis=0 )
_UpperCAmelCase = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
_UpperCAmelCase = padding_value
# make sure array is in float32
_UpperCAmelCase = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> List[np.ndarray]:
_UpperCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(snake_case , snake_case , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(snake_case , snake_case )
]
def __call__( self , snake_case , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_UpperCAmelCase = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_UpperCAmelCase = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
_UpperCAmelCase = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase = [raw_speech]
# extract fbank features
_UpperCAmelCase = [self._extract_fbank_features(snake_case ) for waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase = BatchFeature({'input_features': features} )
_UpperCAmelCase = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
_UpperCAmelCase = padded_inputs.get('input_features' )
if isinstance(input_features[0] , snake_case ):
_UpperCAmelCase = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_UpperCAmelCase = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_UpperCAmelCase = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_UpperCAmelCase = self.normalize(
padded_inputs['input_features'] , attention_mask=snake_case )
if return_tensors is not None:
_UpperCAmelCase = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 714 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase = logging.get_logger(__name__)
lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A )
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
| 24 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''gpt_neox'''
def __init__( self , snake_case=50432 , snake_case=6144 , snake_case=44 , snake_case=64 , snake_case=24576 , snake_case="gelu" , snake_case=0.25 , snake_case=10000 , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case=2048 , snake_case=0.02 , snake_case=1E-5 , snake_case=True , snake_case=0 , snake_case=2 , snake_case=False , snake_case=True , snake_case=None , **snake_case , ) -> Union[str, Any]:
super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = rotary_pct
_UpperCAmelCase = rotary_emb_base
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = classifier_dropout
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = tie_word_embeddings
_UpperCAmelCase = use_parallel_residual
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def lowerCamelCase_ ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
_UpperCAmelCase = self.rope_scaling.get('type' , snake_case )
_UpperCAmelCase = self.rope_scaling.get('factor' , snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(snake_case , snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 715 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = generator('Something there' )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
_UpperCAmelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
with self.assertRaises(snake_case ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
_UpperCAmelCase = 3
_UpperCAmelCase = generator(
'Something there' , num_return_sequences=snake_case , num_beams=snake_case , )
_UpperCAmelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case , snake_case )
_UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case )
self.assertEqual(
snake_case , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
_UpperCAmelCase = generator.model.config.eos_token_id
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = generator(
['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , )
self.assertEqual(
snake_case , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
| 24 | 0 |
"""simple docstring"""
import math
import os
import sys
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = ''
try:
with open(A , 'rb' ) as binary_file:
_UpperCAmelCase = binary_file.read()
for dat in data:
_UpperCAmelCase = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase ( A : dict[str, str] , A : str , A : int , A : str ):
'''simple docstring'''
lexicon.pop(A )
_UpperCAmelCase = last_match_id
if math.loga(A ).is_integer():
for curr_key in lexicon:
_UpperCAmelCase = '0' + lexicon[curr_key]
_UpperCAmelCase = bin(A )[2:]
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = {'0': '0', '1': '1'}
_UpperCAmelCase , _UpperCAmelCase = '', ''
_UpperCAmelCase = len(A )
for i in range(len(A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_UpperCAmelCase = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(A , A , A , A )
index += 1
_UpperCAmelCase = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_UpperCAmelCase = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase ( A : str , A : str ):
'''simple docstring'''
_UpperCAmelCase = os.path.getsize(A )
_UpperCAmelCase = bin(A )[2:]
_UpperCAmelCase = len(A )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase ( A : str , A : str ):
'''simple docstring'''
_UpperCAmelCase = 8
try:
with open(A , 'wb' ) as opened_file:
_UpperCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(A ) , A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(A , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase ( A : str , A : str ):
'''simple docstring'''
_UpperCAmelCase = read_file_binary(A )
_UpperCAmelCase = compress_data(A )
_UpperCAmelCase = add_file_length(A , A )
write_file_binary(A , A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 716 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_UpperCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowercase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase__ ( A, A ):
'''simple docstring'''
_UpperCAmelCase = '''swin'''
_UpperCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]:
super().__init__(**snake_case )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(snake_case )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) )
_UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 717 |
"""simple docstring"""
import os
lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(A ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase ( A : str = "/p089_roman.txt" ):
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(A ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(A )
_UpperCAmelCase = generate_roman_numerals(A )
savings += len(A ) - len(A )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=2 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> Dict:
_UpperCAmelCase = parent
_UpperCAmelCase = 13
_UpperCAmelCase = 7
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 99
_UpperCAmelCase = 384
_UpperCAmelCase = 2
_UpperCAmelCase = 4
_UpperCAmelCase = 37
_UpperCAmelCase = 'gelu'
_UpperCAmelCase = 0.1
_UpperCAmelCase = 0.1
_UpperCAmelCase = 512
_UpperCAmelCase = 16
_UpperCAmelCase = 2
_UpperCAmelCase = 0.02
_UpperCAmelCase = 3
_UpperCAmelCase = 4
_UpperCAmelCase = 128
_UpperCAmelCase = 2
_UpperCAmelCase = 9
_UpperCAmelCase = 1
_UpperCAmelCase = None
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Any:
_UpperCAmelCase = TFConvBertModel(config=snake_case )
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(snake_case )
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
_UpperCAmelCase = TFConvBertForMaskedLM(config=snake_case )
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> str:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFConvBertForSequenceClassification(config=snake_case )
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> int:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFConvBertForMultipleChoice(config=snake_case )
_UpperCAmelCase = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFConvBertForTokenClassification(config=snake_case )
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = TFConvBertForQuestionAnswering(config=snake_case )
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = TFConvBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> int:
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = True
if hasattr(snake_case , 'use_cache' ):
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_UpperCAmelCase = getattr(self.model_tester , 'key_length' , snake_case )
for model_class in self.all_model_classes:
_UpperCAmelCase = self._prepare_for_class(snake_case , snake_case )
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = len(model(snake_case ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case , saved_model=snake_case )
_UpperCAmelCase = os.path.join(snake_case , 'saved_model' , '1' )
_UpperCAmelCase = tf.keras.models.load_model(snake_case )
_UpperCAmelCase = model(snake_case )
if self.is_encoder_decoder:
_UpperCAmelCase = outputs['encoder_hidden_states']
_UpperCAmelCase = outputs['encoder_attentions']
else:
_UpperCAmelCase = outputs['hidden_states']
_UpperCAmelCase = outputs['attentions']
self.assertEqual(len(snake_case ) , snake_case )
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case ) , snake_case )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
_UpperCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_UpperCAmelCase = getattr(self.model_tester , 'key_length' , snake_case )
_UpperCAmelCase = getattr(self.model_tester , 'key_length' , snake_case )
def check_decoder_attentions_output(snake_case ):
_UpperCAmelCase = len(snake_case )
self.assertEqual(out_len % 2 , 0 )
_UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case ):
_UpperCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = model(self._prepare_for_class(snake_case , snake_case ) )
_UpperCAmelCase = len(snake_case )
self.assertEqual(config.output_hidden_states , snake_case )
check_encoder_attentions_output(snake_case )
if self.is_encoder_decoder:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = model(self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(config.output_hidden_states , snake_case )
check_decoder_attentions_output(snake_case )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = model(self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(config.output_hidden_states , snake_case )
check_encoder_attentions_output(snake_case )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = model(self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case ) )
self.assertEqual(model.config.output_hidden_states , snake_case )
check_encoder_attentions_output(snake_case )
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(snake_case )[0]
_UpperCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , snake_case )
_UpperCAmelCase = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1E-4 )
| 718 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
_UpperCAmelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
| 24 | 0 |
from __future__ import annotations
from PIL import Image
# Define glider example
lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def UpperCAmelCase ( A : list[list[int]] ):
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(A ) ):
_UpperCAmelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_UpperCAmelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_UpperCAmelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A )
return next_generation
def UpperCAmelCase ( A : list[list[int]] , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(A ):
# Create output image
_UpperCAmelCase = Image.new('RGB' , (len(cells[0] ), len(A )) )
_UpperCAmelCase = img.load()
# Save cells to image
for x in range(len(A ) ):
for y in range(len(cells[0] ) ):
_UpperCAmelCase = 255 - cells[y][x] * 255
_UpperCAmelCase = (colour, colour, colour)
# Save image
images.append(A )
_UpperCAmelCase = new_generation(A )
return images
if __name__ == "__main__":
lowercase = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 719 |
"""simple docstring"""
import os
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' )
with open(A ) as file_hand:
return str(sum(int(A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 24 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_UpperCAmelCase = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
sd_pipe.set_scheduler('sample_euler' )
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe([prompt] , generator=snake_case , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_UpperCAmelCase = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
sd_pipe.set_scheduler('sample_euler' )
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe([prompt] , generator=snake_case , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_UpperCAmelCase = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=snake_case , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 720 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowercase = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
lowercase = '''hopper-medium-v2'''
lowercase = gym.make(env_name)
lowercase = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
lowercase = env.reset()
lowercase = 0
lowercase = 0
lowercase = 10_00
lowercase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowercase = pipeline(obs, planning_horizon=32)
# execute action in environment
lowercase , lowercase , lowercase , lowercase = env.step(denorm_actions)
lowercase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
lowercase = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 721 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase = logging.get_logger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , *snake_case , **snake_case ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 24 | 0 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( A : str , A : complex , A : str = "x" , A : float = 10**-10 , A : int = 1 , ):
'''simple docstring'''
_UpperCAmelCase = symbols(A )
_UpperCAmelCase = lambdify(A , A )
_UpperCAmelCase = lambdify(A , diff(A , A ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(A ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 700 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''beit'''
def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str:
super().__init__(**snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_mask_token
_UpperCAmelCase = use_absolute_position_embeddings
_UpperCAmelCase = use_relative_position_bias
_UpperCAmelCase = use_shared_relative_position_bias
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase = out_indices
_UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = semantic_loss_ignore_index
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 0 |
"""simple docstring"""
import random
def UpperCAmelCase ( A : list , A : List[Any] ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = [], [], []
for element in data:
if element < pivot:
less.append(A )
elif element > pivot:
greater.append(A )
else:
equal.append(A )
return less, equal, greater
def UpperCAmelCase ( A : list , A : int ):
'''simple docstring'''
if index >= len(A ) or index < 0:
return None
_UpperCAmelCase = items[random.randint(0 , len(A ) - 1 )]
_UpperCAmelCase = 0
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _partition(A , A )
_UpperCAmelCase = len(A )
_UpperCAmelCase = len(A )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A , A )
# must be in larger
else:
return quick_select(A , index - (m + count) )
| 701 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_05_22, type=int)
lowercase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowercase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowercase = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase = [0] * args.vocab_size
for k, v in counter.items():
lowercase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 24 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = FunnelTokenizer
_UpperCAmelCase = FunnelTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def lowerCamelCase_ ( self ) -> int:
super().setUp()
_UpperCAmelCase = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCamelCase_ ( self , **snake_case ) -> Any:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> Union[str, Any]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , snake_case ) -> int:
_UpperCAmelCase = 'UNwant\u00E9d,running'
_UpperCAmelCase = 'unwanted, running'
return input_text, output_text
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [7, 4, 5, 10, 8, 9] )
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case )
for tokenizer in tokenizers:
_UpperCAmelCase = tokenizer('UNwant\u00E9d,running' )
_UpperCAmelCase = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
_UpperCAmelCase = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 702 |
"""simple docstring"""
from itertools import permutations
def UpperCAmelCase ( A : tuple ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(A ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase ( A : int = 10 ):
'''simple docstring'''
return sum(
int(''.join(map(A , A ) ) )
for num in permutations(range(A ) )
if is_substring_divisible(A ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 0 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , snake_case , snake_case = None , snake_case = None , snake_case = True , snake_case = None , snake_case = False , snake_case = None , snake_case = True , snake_case = "arrow" , **snake_case , ) -> Optional[int]:
super().__init__(
split=snake_case , features=snake_case , cache_dir=snake_case , keep_in_memory=snake_case , streaming=snake_case , **snake_case , )
_UpperCAmelCase = load_from_cache_file
_UpperCAmelCase = file_format
_UpperCAmelCase = Spark(
df=snake_case , features=snake_case , cache_dir=snake_case , working_dir=snake_case , **snake_case , )
def lowerCamelCase_ ( self ) -> Optional[Any]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=snake_case , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 703 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
def UpperCAmelCase ( A : int , A : int ) -> Any:
'''simple docstring'''
while b:
_UpperCAmelCase , _UpperCAmelCase = b, a % b
return a
def UpperCAmelCase ( A : int , A : int ) -> Tuple:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(A , a % b )
def UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def UpperCAmelCase ( A : int , A : str , A : Tuple ):
'''simple docstring'''
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(A , config=A )
_UpperCAmelCase = downstream_dict['projector.weight']
_UpperCAmelCase = downstream_dict['projector.bias']
_UpperCAmelCase = downstream_dict['model.post_net.linear.weight']
_UpperCAmelCase = downstream_dict['model.post_net.linear.bias']
return model
def UpperCAmelCase ( A : str , A : int , A : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(A , config=A )
_UpperCAmelCase = downstream_dict['model.linear.weight']
_UpperCAmelCase = downstream_dict['model.linear.bias']
return model
def UpperCAmelCase ( A : int , A : Optional[Any] , A : Dict ):
'''simple docstring'''
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(A , config=A )
_UpperCAmelCase = downstream_dict['connector.weight']
_UpperCAmelCase = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
_UpperCAmelCase = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
_UpperCAmelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
_UpperCAmelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
_UpperCAmelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
_UpperCAmelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
_UpperCAmelCase = downstream_dict['objective.W']
return model
@torch.no_grad()
def UpperCAmelCase ( A : Optional[int] , A : Optional[Any] , A : str , A : Dict ):
'''simple docstring'''
_UpperCAmelCase = torch.load(A , map_location='cpu' )
_UpperCAmelCase = checkpoint['Downstream']
_UpperCAmelCase = WavaVecaConfig.from_pretrained(A )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
A , return_attention_mask=A , do_normalize=A )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
_UpperCAmelCase = convert_classification(A , A , A )
elif arch.endswith('ForAudioFrameClassification' ):
_UpperCAmelCase = convert_diarization(A , A , A )
elif arch.endswith('ForXVector' ):
_UpperCAmelCase = convert_xvector(A , A , A )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(A )
hf_model.save_pretrained(A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
lowercase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 705 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase__ ( A, A ):
'''simple docstring'''
_UpperCAmelCase = '''swin'''
_UpperCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]:
super().__init__(**snake_case )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(snake_case )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) )
_UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
_UpperCAmelCase = dict(zip(snake_case , range(len(snake_case ) ) ) )
_UpperCAmelCase = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
_UpperCAmelCase = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 16000,
'return_attention_mask': False,
'do_normalize': True,
}
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_UpperCAmelCase = os.path.join(self.tmpdirname , snake_case )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
# load decoder from hub
_UpperCAmelCase = 'hf-internal-testing/ngram-beam-search-decoder'
def lowerCamelCase_ ( self , **snake_case ) -> Tuple:
_UpperCAmelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> int:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **snake_case )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(snake_case , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
_UpperCAmelCase = floats_list((3, 1000) )
_UpperCAmelCase = feature_extractor(snake_case , return_tensors='np' )
_UpperCAmelCase = processor(snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
_UpperCAmelCase = 'This is a test string'
_UpperCAmelCase = processor(text=snake_case )
_UpperCAmelCase = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self , snake_case=(2, 10, 16) , snake_case=77 ) -> Optional[Any]:
np.random.seed(snake_case )
return np.random.rand(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
_UpperCAmelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_UpperCAmelCase = processor.decode(snake_case )
_UpperCAmelCase = decoder.decode_beams(snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
_UpperCAmelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_UpperCAmelCase = processor.batch_decode(snake_case )
else:
with get_context(snake_case ).Pool() as pool:
_UpperCAmelCase = processor.batch_decode(snake_case , snake_case )
_UpperCAmelCase = list(snake_case )
with get_context('fork' ).Pool() as p:
_UpperCAmelCase = decoder.decode_beams_batch(snake_case , snake_case )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(snake_case , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(snake_case , decoded_processor.logit_score )
self.assertListEqual(snake_case , decoded_processor.lm_score )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
_UpperCAmelCase = self._get_dummy_logits()
_UpperCAmelCase = 15
_UpperCAmelCase = -20.0
_UpperCAmelCase = -4.0
_UpperCAmelCase = processor.batch_decode(
snake_case , beam_width=snake_case , beam_prune_logp=snake_case , token_min_logp=snake_case , )
_UpperCAmelCase = decoded_processor_out.text
_UpperCAmelCase = list(snake_case )
with get_context('fork' ).Pool() as pool:
_UpperCAmelCase = decoder.decode_beams_batch(
snake_case , snake_case , beam_width=snake_case , beam_prune_logp=snake_case , token_min_logp=snake_case , )
_UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
_UpperCAmelCase = [d[0][2] for d in decoded_decoder_out]
_UpperCAmelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , snake_case )
self.assertTrue(np.array_equal(snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , snake_case , atol=1E-3 ) )
self.assertTrue(np.array_equal(snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , snake_case , atol=1E-3 ) )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
_UpperCAmelCase = self._get_dummy_logits()
_UpperCAmelCase = 2.0
_UpperCAmelCase = 5.0
_UpperCAmelCase = -20.0
_UpperCAmelCase = True
_UpperCAmelCase = processor.batch_decode(
snake_case , alpha=snake_case , beta=snake_case , unk_score_offset=snake_case , lm_score_boundary=snake_case , )
_UpperCAmelCase = decoded_processor_out.text
_UpperCAmelCase = list(snake_case )
decoder.reset_params(
alpha=snake_case , beta=snake_case , unk_score_offset=snake_case , lm_score_boundary=snake_case , )
with get_context('fork' ).Pool() as pool:
_UpperCAmelCase = decoder.decode_beams_batch(
snake_case , snake_case , )
_UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , snake_case )
_UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , snake_case )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
_UpperCAmelCase = os.listdir(snake_case )
_UpperCAmelCase = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = snapshot_download('hf-internal-testing/processor_with_lm' )
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(snake_case )
_UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
_UpperCAmelCase = os.listdir(snake_case )
_UpperCAmelCase = os.listdir(snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_UpperCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
_UpperCAmelCase = floats_list((3, 1000) )
_UpperCAmelCase = processor_wavaveca(snake_case , return_tensors='np' )
_UpperCAmelCase = processor_auto(snake_case , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_UpperCAmelCase = self._get_dummy_logits()
_UpperCAmelCase = processor_wavaveca.batch_decode(snake_case )
_UpperCAmelCase = processor_auto.batch_decode(snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def lowerCamelCase_ ( snake_case , snake_case ) -> Dict:
_UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_UpperCAmelCase = self._get_dummy_logits()[0]
_UpperCAmelCase = processor.decode(snake_case , output_word_offsets=snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(snake_case , snake_case ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_UpperCAmelCase = self._get_dummy_logits()
_UpperCAmelCase = processor.batch_decode(snake_case , output_word_offsets=snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(snake_case , snake_case ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(snake_case , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCamelCase_ ( self ) -> List[str]:
import torch
_UpperCAmelCase = load_dataset('common_voice' , 'en' , split='train' , streaming=snake_case )
_UpperCAmelCase = ds.cast_column('audio' , datasets.Audio(sampling_rate=16000 ) )
_UpperCAmelCase = iter(snake_case )
_UpperCAmelCase = next(snake_case )
_UpperCAmelCase = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
_UpperCAmelCase = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_UpperCAmelCase = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
_UpperCAmelCase = model(snake_case ).logits.cpu().numpy()
_UpperCAmelCase = processor.decode(logits[0] , output_word_offsets=snake_case )
_UpperCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_UpperCAmelCase = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
_UpperCAmelCase = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(snake_case , 'word' ) ) , snake_case )
self.assertEqual(' '.join(self.get_from_offsets(snake_case , 'word' ) ) , output.text )
# output times
_UpperCAmelCase = torch.tensor(self.get_from_offsets(snake_case , 'start_time' ) )
_UpperCAmelCase = torch.tensor(self.get_from_offsets(snake_case , 'end_time' ) )
# fmt: off
_UpperCAmelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_UpperCAmelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(snake_case , snake_case , atol=0.01 ) )
self.assertTrue(torch.allclose(snake_case , snake_case , atol=0.01 ) )
| 706 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str:
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any:
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case )
| 24 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def UpperCAmelCase ( A : Optional[Any] ):
'''simple docstring'''
if isinstance(A , torch.Tensor ):
return image
elif isinstance(A , PIL.Image.Image ):
_UpperCAmelCase = [image]
_UpperCAmelCase = [trans(img.convert('RGB' ) ) for img in image]
_UpperCAmelCase = torch.stack(A )
return image
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , snake_case , snake_case ) -> List[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=snake_case , scheduler=snake_case )
def lowerCamelCase_ ( self , snake_case ) -> Optional[Any]:
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}' )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> str:
# get the original timestep using init_timestep
_UpperCAmelCase = min(int(num_inference_steps * strength ) , snake_case )
_UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None ) -> List[str]:
if not isinstance(snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case )}' )
_UpperCAmelCase = image.to(device=snake_case , dtype=snake_case )
if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(snake_case )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_UpperCAmelCase = init_latents.shape
_UpperCAmelCase = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
# get latents
print('add noise to latents at timestep' , snake_case )
_UpperCAmelCase = self.scheduler.add_noise(snake_case , snake_case , snake_case )
_UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , snake_case = None , snake_case = 0.8 , snake_case = 1 , snake_case = None , snake_case = 0.0 , snake_case = 50 , snake_case = None , snake_case = "pil" , snake_case = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(snake_case )
# 2. Preprocess image
_UpperCAmelCase = preprocess(snake_case )
# 3. set timesteps
self.scheduler.set_timesteps(snake_case , device=self.device )
_UpperCAmelCase , _UpperCAmelCase = self.get_timesteps(snake_case , snake_case , self.device )
_UpperCAmelCase = timesteps[:1].repeat(snake_case )
# 4. Prepare latent variables
_UpperCAmelCase = self.prepare_latents(snake_case , snake_case , snake_case , self.unet.dtype , self.device , snake_case )
_UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(snake_case ):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(snake_case , snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to ฮท in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(
snake_case , snake_case , snake_case , eta=snake_case , use_clipped_model_output=snake_case , generator=snake_case , ).prev_sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=snake_case )
| 707 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) )
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = CvtModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = CvtForImageClassification(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = CvtModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCamelCase_ ( self ) -> str:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCamelCase_ ( self ) -> int:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
def check_hidden_states_output(snake_case , snake_case , snake_case ):
_UpperCAmelCase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(snake_case ) , snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self ) -> Dict:
pass
@slow
def lowerCamelCase_ ( self ) -> Dict:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = CvtModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
_UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 24 | 0 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase = datasets.utils.logging.get_logger(__name__)
class lowercase__ ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
_UpperCAmelCase = None
_UpperCAmelCase = None
class lowercase__ ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
_UpperCAmelCase = datasets.Audio()
_UpperCAmelCase = '''audio'''
_UpperCAmelCase = AudioFolderConfig
_UpperCAmelCase = 42 # definition at the bottom of the script
_UpperCAmelCase = AudioClassification(audio_column='''audio''', label_column='''label''' )
lowercase = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowercase = AUDIO_EXTENSIONS
| 708 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( A : int , A : int , A : int ):
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_UpperCAmelCase = b * b - 4 * a * c
_UpperCAmelCase = (-b + sqrt(A )) / (2 * a)
_UpperCAmelCase = (-b - sqrt(A )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 24 | 0 |
"""simple docstring"""
def UpperCAmelCase ( A : Any , A : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = [1]
for i in range(2 , A ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_UpperCAmelCase = []
_UpperCAmelCase = list(range(A ) )
# Find permutation
while factorials:
_UpperCAmelCase = factorials.pop()
_UpperCAmelCase , _UpperCAmelCase = divmod(A , A )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = BarthezTokenizer
_UpperCAmelCase = BarthezTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def lowerCamelCase_ ( self ) -> Optional[int]:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case )
_UpperCAmelCase = tokenizer
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case ) , 101122 )
def lowerCamelCase_ ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsรฉ.'
_UpperCAmelCase = tokenizer.tokenize(snake_case )
_UpperCAmelCase = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
# fmt: off
_UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
'Le transformeur est un modรจle d\'apprentissage profond introduit en 2017, '
'utilisรฉ principalement dans le domaine du traitement automatique des langues (TAL).',
'ร l\'instar des rรฉseaux de neurones rรฉcurrents (RNN), les transformeurs sont conรงus '
'pour gรฉrer des donnรฉes sรฉquentielles, telles que le langage naturel, pour des tรขches '
'telles que la traduction et la synthรจse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
| 24 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = IFPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCamelCase_ ( self ) -> int:
return self._get_dummy_components()
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> int:
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> Any:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowerCamelCase_ ( self ) -> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase_ ( self ) -> Any:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase_ ( self ) -> Any:
self._test_save_load_local()
def lowerCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> Dict:
# if
_UpperCAmelCase = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_UpperCAmelCase = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=snake_case , tokenizer=snake_case )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_UpperCAmelCase , _UpperCAmelCase = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_UpperCAmelCase = None
_UpperCAmelCase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case , snake_case , snake_case , snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_UpperCAmelCase = IFImgaImgPipeline(**pipe_a.components )
_UpperCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case , snake_case , snake_case , snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_UpperCAmelCase = IFInpaintingPipeline(**pipe_a.components )
_UpperCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case , snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> Union[str, Any]:
# pipeline 1
_start_torch_memory_measurement()
_UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_UpperCAmelCase = pipe_a(
prompt_embeds=snake_case , negative_prompt_embeds=snake_case , num_inference_steps=2 , generator=snake_case , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(snake_case , snake_case )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case )
_UpperCAmelCase = pipe_a(
prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , generator=snake_case , num_inference_steps=2 , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case , snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case )
_UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_UpperCAmelCase = pipe_a(
prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , num_inference_steps=2 , generator=snake_case , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(snake_case , snake_case )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case )
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case )
_UpperCAmelCase = pipe_a(
prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , original_image=snake_case , generator=snake_case , num_inference_steps=2 , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case , snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case )
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(snake_case )
_UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_UpperCAmelCase = pipe_a(
prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , mask_image=snake_case , num_inference_steps=2 , generator=snake_case , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(snake_case , snake_case )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case )
_UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case )
_UpperCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(snake_case )
_UpperCAmelCase = pipe_a(
prompt_embeds=snake_case , negative_prompt_embeds=snake_case , image=snake_case , mask_image=snake_case , original_image=snake_case , generator=snake_case , num_inference_steps=2 , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
_UpperCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case , snake_case )
def UpperCAmelCase ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 710 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = DiTPipeline
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
_UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]:
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = pipe(**snake_case ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1E-3 )
def lowerCamelCase_ ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 24 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class lowercase__ ( A, A ):
'''simple docstring'''
_UpperCAmelCase = '''resnet'''
_UpperCAmelCase = ['''basic''', '''bottleneck''']
def __init__( self , snake_case=3 , snake_case=64 , snake_case=[256, 512, 1024, 2048] , snake_case=[3, 4, 6, 3] , snake_case="bottleneck" , snake_case="relu" , snake_case=False , snake_case=None , snake_case=None , **snake_case , ) -> Optional[int]:
super().__init__(**snake_case )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
_UpperCAmelCase = num_channels
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = layer_type
_UpperCAmelCase = hidden_act
_UpperCAmelCase = downsample_in_first_stage
_UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-3
| 711 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase ( A : int ):
'''simple docstring'''
return sum(int(A ) for c in str(abs(A ) ) )
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A : Callable , A : int ) -> None:
_UpperCAmelCase = f'{func.__name__}({value})'
_UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 712 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
create_all_state(1 , A , A , [] , A )
return result
def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def UpperCAmelCase ( A : list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*A )
if __name__ == "__main__":
lowercase = 4
lowercase = 2
lowercase = generate_all_combinations(n, k)
print_all_state(total_list)
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_UpperCAmelCase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_UpperCAmelCase = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
_UpperCAmelCase = tf_top_k_top_p_filtering(snake_case , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
_UpperCAmelCase = output[output != -float('inf' )]
_UpperCAmelCase = tf.cast(
tf.where(tf.not_equal(snake_case , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(snake_case , snake_case , rtol=1E-12 )
tf.debugging.assert_equal(snake_case , snake_case )
@require_tf
class lowercase__ ( unittest.TestCase, A ):
'''simple docstring'''
if is_tf_available():
_UpperCAmelCase = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowerCamelCase_ ( self ) -> List[str]:
# TF-only test: tf.saved_model export
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 2
_UpperCAmelCase = 2
class lowercase__ ( tf.Module ):
'''simple docstring'''
def __init__( self , snake_case ) -> Optional[Any]:
super(snake_case , self ).__init__()
_UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=snake_case , )
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = self.model.generate(
input_ids=snake_case , attention_mask=snake_case , max_new_tokens=snake_case , return_dict_in_generate=snake_case , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase = [[2, 0], [102, 103]]
_UpperCAmelCase = [[1, 0], [1, 1]]
_UpperCAmelCase = DummyModel(model=snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case , snake_case , signatures={'serving_default': dummy_model.serving} )
_UpperCAmelCase = tf.saved_model.load(snake_case ).signatures['serving_default']
for batch_size in range(1 , len(snake_case ) + 1 ):
_UpperCAmelCase = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
_UpperCAmelCase = serving_func(**snake_case )['sequences']
_UpperCAmelCase = test_model.generate(**snake_case , max_new_tokens=snake_case )
tf.debugging.assert_equal(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> int:
# TF-only test: tf.saved_model export
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 1
_UpperCAmelCase = 2
class lowercase__ ( tf.Module ):
'''simple docstring'''
def __init__( self , snake_case ) -> str:
super(snake_case , self ).__init__()
_UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=snake_case , )
def lowerCamelCase_ ( self , snake_case , snake_case ) -> int:
_UpperCAmelCase = self.model.generate(
input_ids=snake_case , attention_mask=snake_case , max_new_tokens=snake_case , return_dict_in_generate=snake_case , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase = [[2], [102, 103]]
_UpperCAmelCase = [[1], [1, 1]]
_UpperCAmelCase = DummyModel(model=snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case , snake_case , signatures={'serving_default': dummy_model.serving} )
_UpperCAmelCase = tf.saved_model.load(snake_case ).signatures['serving_default']
for input_row in range(len(snake_case ) ):
_UpperCAmelCase = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
_UpperCAmelCase = serving_func(**snake_case )['sequences']
_UpperCAmelCase = test_model.generate(**snake_case , max_new_tokens=snake_case )
tf.debugging.assert_equal(snake_case , snake_case )
@slow
@require_tensorflow_text
def lowerCamelCase_ ( self ) -> Any:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=snake_case )
class lowercase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ) -> Dict:
super().__init__()
_UpperCAmelCase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(snake_case , 'spiece.model' ) , 'rb' ).read() )
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def lowerCamelCase_ ( self , snake_case , *snake_case , **snake_case ) -> str:
_UpperCAmelCase = self.tokenizer.tokenize(snake_case )
_UpperCAmelCase , _UpperCAmelCase = text.pad_model_inputs(
snake_case , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
_UpperCAmelCase = self.model.generate(input_ids=snake_case , attention_mask=snake_case )
return self.tokenizer.detokenize(snake_case )
_UpperCAmelCase = CompleteSentenceTransformer()
_UpperCAmelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
_UpperCAmelCase = complete_model(snake_case )
_UpperCAmelCase = tf.keras.Model(snake_case , snake_case )
keras_model.save(snake_case )
def lowerCamelCase_ ( self ) -> List[Any]:
# Has PT equivalent: this test relies on random sampling
_UpperCAmelCase = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
_UpperCAmelCase = 14
_UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 'Hello, my dog is cute and'
_UpperCAmelCase = tokenizer(snake_case , return_tensors='tf' )
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
_UpperCAmelCase = model.generate(**snake_case , eos_token_id=snake_case , **snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_UpperCAmelCase = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
_UpperCAmelCase = model.generate(**snake_case , eos_token_id=snake_case , **snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCamelCase_ ( self ) -> Dict:
# Has PT equivalent: ample use of framework-specific code
_UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
_UpperCAmelCase = 'Hugging Face is a technology company based in New York and Paris.'
_UpperCAmelCase = bart_tokenizer(snake_case , return_tensors='tf' ).input_ids
_UpperCAmelCase = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
_UpperCAmelCase = bart_model.generate(snake_case ).numpy()
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case , snake_case=None , **snake_case ) -> str:
return super().call(snake_case , **snake_case )
_UpperCAmelCase = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
_UpperCAmelCase = bart_model.generate(snake_case , foo='bar' ).numpy()
self.assertTrue(np.array_equal(snake_case , snake_case ) )
class lowercase__ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case , **snake_case ) -> int:
return super().call(snake_case , **snake_case )
_UpperCAmelCase = FakeEncoder(bart_model.config , bart_model.model.shared )
_UpperCAmelCase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_UpperCAmelCase = bart_model.generate(snake_case ).numpy()
with self.assertRaises(snake_case ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(snake_case , foo='bar' )
| 713 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase ( A : Path , A : list ):
'''simple docstring'''
_UpperCAmelCase = '\n'.join(A )
Path(A ).open('w' ).writelines(A )
lowercase = '''patrickvonplaten/t5-tiny-random'''
lowercase = '''sshleifer/bart-tiny-random'''
lowercase = '''sshleifer/tiny-mbart'''
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case ) -> str:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(snake_case , snake_case )
_UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(snake_case , 'argv' , snake_case ):
run_generate()
assert Path(snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase_ ( self ) -> str:
self.run_eval_tester(snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
self.run_eval_tester(snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> Dict:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist groรartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = str(tmp_dir / 'scores.json' )
_UpperCAmelCase = str(tmp_dir / 'val.target' )
_dump_articles(snake_case , text['en'] )
_dump_articles(snake_case , text['de'] )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(snake_case , 'argv' , snake_case ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args']
_UpperCAmelCase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case ).exists()
os.remove(Path(snake_case ) )
| 24 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase ( A : Dict ):
_UpperCAmelCase = filter(lambda A : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase = logging.getLogger(__name__)
def UpperCAmelCase ( A : str , A : int ):
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=A , filename=A , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ):
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=A , verbose=A , )
class lowercase__ ( pl.Callback ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = {f'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case )
@rank_zero_only
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case=True ) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_UpperCAmelCase = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=snake_case )
generations_file.parent.mkdir(exist_ok=snake_case )
with open(snake_case , 'a+' ) as writer:
for key in sorted(snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(snake_case , torch.Tensor ):
_UpperCAmelCase = val.item()
_UpperCAmelCase = f'{key}: {val:.6f}\n'
writer.write(snake_case )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case )
@rank_zero_only
def lowerCamelCase_ ( self , snake_case , snake_case ) -> List[Any]:
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCamelCase_ ( self , snake_case , snake_case ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case , snake_case , 'test' )
@rank_zero_only
def lowerCamelCase_ ( self , snake_case , snake_case ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 714 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase = logging.get_logger(__name__)
lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A )
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
| 24 | 0 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase ( A : Tuple , A : List[str]=False ):
'''simple docstring'''
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase = parse_flag_from_env('''RUN_SLOW''', default=False)
lowercase = parse_flag_from_env('''RUN_REMOTE''', default=False)
lowercase = parse_flag_from_env('''RUN_LOCAL''', default=True)
lowercase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
lowercase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
lowercase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
lowercase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
lowercase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
lowercase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
lowercase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
lowercase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase ( A : Optional[Any] ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip('test requires faiss' )(A )
return test_case
def UpperCAmelCase ( A : List[Any] ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip('test requires regex' )(A )
return test_case
def UpperCAmelCase ( A : List[str] ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip('test requires elasticsearch' )(A )
return test_case
def UpperCAmelCase ( A : Optional[int] ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip('test requires sqlalchemy' )(A )
return test_case
def UpperCAmelCase ( A : str ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip('test requires PyTorch' )(A )
return test_case
def UpperCAmelCase ( A : Tuple ):
'''simple docstring'''
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip('test requires TensorFlow' )(A )
return test_case
def UpperCAmelCase ( A : Optional[Any] ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip('test requires JAX' )(A )
return test_case
def UpperCAmelCase ( A : str ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip('test requires Pillow' )(A )
return test_case
def UpperCAmelCase ( A : Union[str, Any] ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(A )
else:
return test_case
def UpperCAmelCase ( A : Optional[int] ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(A )
else:
return test_case
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(A )
else:
return test_case
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
def _require_spacy_model(A : Dict ):
try:
import spacy # noqa F401
spacy.load(A )
except ImportError:
return unittest.skip('test requires spacy' )(A )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(A ) )(A )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase ( A : str ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(A )
else:
return test_case
def UpperCAmelCase ( A : Optional[int] ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(A )
else:
return test_case
def UpperCAmelCase ( A : List[str] ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip('test is slow' )(A )
return test_case
def UpperCAmelCase ( A : List[Any] ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip('test is local' )(A )
return test_case
def UpperCAmelCase ( A : Dict ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip('test is packaged' )(A )
return test_case
def UpperCAmelCase ( A : List[Any] ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip('test requires remote' )(A )
return test_case
def UpperCAmelCase ( *A : List[Any] ):
'''simple docstring'''
def decorate(cls : Tuple ):
for name, fn in cls.__dict__.items():
if callable(A ) and name.startswith('test' ):
for decorator in decorators:
_UpperCAmelCase = decorator(A )
setattr(cls , A , A )
return cls
return decorate
class lowercase__ ( A ):
'''simple docstring'''
pass
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase ( A : Optional[Any]=OfflineSimulationMode.CONNECTION_FAILS , A : Optional[int]=1e-16 ):
'''simple docstring'''
_UpperCAmelCase = requests.Session().request
def timeout_request(A : str , A : Tuple , A : List[Any] , **A : List[str] ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
_UpperCAmelCase = timeout
try:
return online_request(A , A , **A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace('10.255.255.1' , f'OfflineMock[{url}]' ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(A : Dict , A : int , **A : Optional[int] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , A ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase ( *A : List[str] , **A : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*A , **A ) as tmp_dir:
try:
os.chdir(A )
yield
finally:
os.chdir(A )
@contextmanager
def UpperCAmelCase ( ):
'''simple docstring'''
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase ( ):
'''simple docstring'''
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase ( A : Optional[Any] , A : str ):
'''simple docstring'''
return deepcopy(A ).integers(0 , 100 , 10 ).tolist() == deepcopy(A ).integers(0 , 100 , 10 ).tolist()
def UpperCAmelCase ( A : int ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(A : str , *A : Dict , **A : List[str] ):
try:
return func(*A , **A )
except HTTPError as err:
if str(A ).startswith('500' ) or str(A ).startswith('502' ):
pytest.xfail(str(A ) )
raise err
return decorator.decorator(_wrapper , A )
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def UpperCAmelCase ( A : List[str] , A : int ):
'''simple docstring'''
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(A )
else:
break
async def UpperCAmelCase ( A : str , A : Tuple=None , A : List[str]=None , A : Dict=None , A : int=False , A : Dict=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(A : Optional[int] , A : Optional[Any] , A : Optional[int] , A : Optional[Any]="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(A )
if not quiet:
print(A , A , file=A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda A : tee(A , A , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda A : tee(A , A , sys.stderr , label='stderr:' ) ),
] , timeout=A , )
return _RunOutput(await p.wait() , A , A )
def UpperCAmelCase ( A : Tuple , A : Any=None , A : Dict=None , A : Optional[int]=180 , A : str=False , A : Dict=True ):
'''simple docstring'''
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(A , env=A , stdin=A , timeout=A , quiet=A , echo=A ) )
_UpperCAmelCase = ' '.join(A )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
_UpperCAmelCase = re.sub(r'^gw' , '' , A , 0 , re.M )
return int(A )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = 2_9500
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 715 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = generator('Something there' )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
_UpperCAmelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
with self.assertRaises(snake_case ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
_UpperCAmelCase = 3
_UpperCAmelCase = generator(
'Something there' , num_return_sequences=snake_case , num_beams=snake_case , )
_UpperCAmelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case , snake_case )
_UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case )
self.assertEqual(
snake_case , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
_UpperCAmelCase = generator.model.config.eos_token_id
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = generator(
['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , )
self.assertEqual(
snake_case , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
| 24 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowercase = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowercase = '''UperNetConfig'''
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case = 0 , snake_case = False , snake_case = 1 , ) -> None:
super().__init__()
_UpperCAmelCase = nn.Convad(
in_channels=snake_case , out_channels=snake_case , kernel_size=snake_case , padding=snake_case , bias=snake_case , dilation=snake_case , )
_UpperCAmelCase = nn.BatchNormad(snake_case )
_UpperCAmelCase = nn.ReLU()
def lowerCamelCase_ ( self , snake_case ) -> torch.Tensor:
_UpperCAmelCase = self.conv(snake_case )
_UpperCAmelCase = self.batch_norm(snake_case )
_UpperCAmelCase = self.activation(snake_case )
return output
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case ) -> None:
super().__init__()
_UpperCAmelCase = [
nn.AdaptiveAvgPoolad(snake_case ),
UperNetConvModule(snake_case , snake_case , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(snake_case ) , snake_case )
def lowerCamelCase_ ( self , snake_case ) -> torch.Tensor:
_UpperCAmelCase = input
for layer in self.layers:
_UpperCAmelCase = layer(snake_case )
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case ) -> None:
super().__init__()
_UpperCAmelCase = pool_scales
_UpperCAmelCase = align_corners
_UpperCAmelCase = in_channels
_UpperCAmelCase = channels
_UpperCAmelCase = []
for i, pool_scale in enumerate(snake_case ):
_UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=snake_case , in_channels=snake_case , channels=snake_case )
self.blocks.append(snake_case )
self.add_module(str(snake_case ) , snake_case )
def lowerCamelCase_ ( self , snake_case ) -> List[torch.Tensor]:
_UpperCAmelCase = []
for ppm in self.blocks:
_UpperCAmelCase = ppm(snake_case )
_UpperCAmelCase = nn.functional.interpolate(
snake_case , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(snake_case )
return ppm_outs
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case ) -> List[str]:
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6)
_UpperCAmelCase = in_channels
_UpperCAmelCase = config.hidden_size
_UpperCAmelCase = False
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
_UpperCAmelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
_UpperCAmelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
_UpperCAmelCase = nn.ModuleList()
_UpperCAmelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_UpperCAmelCase = UperNetConvModule(snake_case , self.channels , kernel_size=1 )
_UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(snake_case )
self.fpn_convs.append(snake_case )
_UpperCAmelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowerCamelCase_ ( self ) -> Optional[int]:
self.apply(self._init_weights )
def lowerCamelCase_ ( self , snake_case ) -> Tuple:
if isinstance(snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCamelCase_ ( self , snake_case ) -> Optional[int]:
_UpperCAmelCase = inputs[-1]
_UpperCAmelCase = [x]
psp_outs.extend(self.psp_modules(snake_case ) )
_UpperCAmelCase = torch.cat(snake_case , dim=1 )
_UpperCAmelCase = self.bottleneck(snake_case )
return output
def lowerCamelCase_ ( self , snake_case ) -> torch.Tensor:
# build laterals
_UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(snake_case ) )
# build top-down path
_UpperCAmelCase = len(snake_case )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_UpperCAmelCase = laterals[i - 1].shape[2:]
_UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=snake_case , mode='bilinear' , align_corners=self.align_corners )
# build outputs
_UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_UpperCAmelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
_UpperCAmelCase = torch.cat(snake_case , dim=1 )
_UpperCAmelCase = self.fpn_bottleneck(snake_case )
_UpperCAmelCase = self.classifier(snake_case )
return output
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case , snake_case = 2 , snake_case = 3 , snake_case = 1 ) -> None:
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.auxiliary_in_channels
_UpperCAmelCase = config.auxiliary_channels
_UpperCAmelCase = config.auxiliary_num_convs
_UpperCAmelCase = config.auxiliary_concat_input
_UpperCAmelCase = in_index
_UpperCAmelCase = (kernel_size // 2) * dilation
_UpperCAmelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=snake_case , padding=snake_case , dilation=snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=snake_case , padding=snake_case , dilation=snake_case ) )
if self.num_convs == 0:
_UpperCAmelCase = nn.Identity()
else:
_UpperCAmelCase = nn.Sequential(*snake_case )
if self.concat_input:
_UpperCAmelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=snake_case , padding=kernel_size // 2 )
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowerCamelCase_ ( self ) -> Dict:
self.apply(self._init_weights )
def lowerCamelCase_ ( self , snake_case ) -> Union[str, Any]:
if isinstance(snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCamelCase_ ( self , snake_case ) -> torch.Tensor:
# just take the relevant feature maps
_UpperCAmelCase = encoder_hidden_states[self.in_index]
_UpperCAmelCase = self.convs(snake_case )
if self.concat_input:
_UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
_UpperCAmelCase = self.classifier(snake_case )
return output
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = UperNetConfig
_UpperCAmelCase = '''pixel_values'''
_UpperCAmelCase = True
def lowerCamelCase_ ( self , snake_case ) -> List[str]:
if isinstance(snake_case , snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowerCamelCase_ ( self ) -> Optional[int]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowerCamelCase_ ( self , snake_case , snake_case=False ) -> Any:
if isinstance(snake_case , snake_case ):
_UpperCAmelCase = value
lowercase = r'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowercase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''', A, )
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , snake_case ) -> Dict:
super().__init__(snake_case )
_UpperCAmelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
_UpperCAmelCase = UperNetHead(snake_case , in_channels=self.backbone.channels )
_UpperCAmelCase = UperNetFCNHead(snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC )
def lowerCamelCase_ ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ) -> Union[tuple, SemanticSegmenterOutput]:
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
_UpperCAmelCase = self.backbone.forward_with_filtered_kwargs(
snake_case , output_hidden_states=snake_case , output_attentions=snake_case )
_UpperCAmelCase = outputs.feature_maps
_UpperCAmelCase = self.decode_head(snake_case )
_UpperCAmelCase = nn.functional.interpolate(snake_case , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=snake_case )
_UpperCAmelCase = None
if self.auxiliary_head is not None:
_UpperCAmelCase = self.auxiliary_head(snake_case )
_UpperCAmelCase = nn.functional.interpolate(
snake_case , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=snake_case )
_UpperCAmelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
_UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
_UpperCAmelCase = loss_fct(snake_case , snake_case )
_UpperCAmelCase = loss_fct(snake_case , snake_case )
_UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_UpperCAmelCase = (logits,) + outputs[1:]
else:
_UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 716 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_UpperCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowercase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = SwinConfig()
_UpperCAmelCase = swin_name.split('_' )
_UpperCAmelCase = name_split[1]
_UpperCAmelCase = int(name_split[4] )
_UpperCAmelCase = int(name_split[3][-1] )
if model_size == "tiny":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 6, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase = 128
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (4, 8, 16, 32)
else:
_UpperCAmelCase = 192
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (6, 12, 24, 48)
if "in22k" in swin_name:
_UpperCAmelCase = 2_1841
else:
_UpperCAmelCase = 1000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(A ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = img_size
_UpperCAmelCase = num_classes
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
return config
def UpperCAmelCase ( A : List[str] ):
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_UpperCAmelCase = 'encoder.' + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_UpperCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_UpperCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_UpperCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
_UpperCAmelCase = 'layernorm.weight'
if name == "norm.bias":
_UpperCAmelCase = 'layernorm.bias'
if "head" in name:
_UpperCAmelCase = name.replace('head' , 'classifier' )
else:
_UpperCAmelCase = 'swin.' + name
return name
def UpperCAmelCase ( A : Dict , A : Tuple ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(A )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[
dim : dim * 2, :
]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[
:dim
]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[
-dim:
]
else:
_UpperCAmelCase = val
return orig_state_dict
def UpperCAmelCase ( A : Optional[Any] , A : List[str] ):
'''simple docstring'''
_UpperCAmelCase = timm.create_model(A , pretrained=A )
timm_model.eval()
_UpperCAmelCase = get_swin_config(A )
_UpperCAmelCase = SwinForImageClassification(A )
model.eval()
_UpperCAmelCase = convert_state_dict(timm_model.state_dict() , A )
model.load_state_dict(A )
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
_UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw )
_UpperCAmelCase = image_processor(images=A , return_tensors='pt' )
_UpperCAmelCase = timm_model(inputs['pixel_values'] )
_UpperCAmelCase = model(**A ).logits
assert torch.allclose(A , A , atol=1e-3 )
print(f'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 717 |
"""simple docstring"""
import os
lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(A ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase ( A : str = "/p089_roman.txt" ):
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(A ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(A )
_UpperCAmelCase = generate_roman_numerals(A )
savings += len(A ) - len(A )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase ( A : Dict ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_UpperCAmelCase = k.replace(A , A )
return k
def UpperCAmelCase ( A : dict , A : dict ):
'''simple docstring'''
_UpperCAmelCase = DEFAULTS.copy()
cfg_kwargs.update(A )
_UpperCAmelCase = PegasusConfig(**A )
_UpperCAmelCase = PegasusForConditionalGeneration(A )
_UpperCAmelCase = torch_model.model.state_dict()
_UpperCAmelCase = {}
for k, v in tf_weights.items():
_UpperCAmelCase = rename_state_dict_key(A )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCAmelCase = v.T
_UpperCAmelCase = torch.tensor(A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCAmelCase = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
_UpperCAmelCase = mapping['shared.weight']
_UpperCAmelCase = mapping['shared.weight']
_UpperCAmelCase = {k: torch.zeros_like(A ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**A )
_UpperCAmelCase , _UpperCAmelCase = torch_model.model.load_state_dict(A , strict=A )
_UpperCAmelCase = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def UpperCAmelCase ( A : Optional[Any]="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
_UpperCAmelCase = tf.train.list_variables(A )
_UpperCAmelCase = {}
_UpperCAmelCase = ['Adafactor', 'global_step']
for name, shape in tqdm(A , desc='converting tf checkpoint to dict' ):
_UpperCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase = tf.train.load_variable(A , A )
_UpperCAmelCase = array
return tf_weights
def UpperCAmelCase ( A : str , A : str ):
'''simple docstring'''
_UpperCAmelCase = Path(A ).parent.name
_UpperCAmelCase = task_specific_params[f'summarization_{dataset}']['max_position_embeddings']
_UpperCAmelCase = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(A )
# convert model
_UpperCAmelCase = get_tf_weights_as_numpy(A )
_UpperCAmelCase = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCAmelCase = task_specific_params
_UpperCAmelCase = convert_pegasus(A , A )
torch_model.save_pretrained(A )
_UpperCAmelCase = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(A , Path(A ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase = parser.parse_args()
if args.save_dir is None:
lowercase = Path(args.tf_ckpt_path).parent.name
lowercase = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 718 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
_UpperCAmelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
| 24 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowercase = (7_20, 12_80) # Height, Width
lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowercase = 1 / 1_00
lowercase = ''''''
lowercase = ''''''
lowercase = ''''''
lowercase = 2_50
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = get_dataset(A , A )
for index in range(A ):
_UpperCAmelCase = random.sample(range(len(A ) ) , 4 )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = update_image_and_anno(
A , A , A , A , A , filter_scale=A , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCAmelCase = random_chars(32 )
_UpperCAmelCase = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
_UpperCAmelCase = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
_UpperCAmelCase = []
for anno in new_annos:
_UpperCAmelCase = anno[3] - anno[1]
_UpperCAmelCase = anno[4] - anno[2]
_UpperCAmelCase = anno[1] + width / 2
_UpperCAmelCase = anno[2] + height / 2
_UpperCAmelCase = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(A )
with open(f'{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCAmelCase ( A : str , A : str ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = []
for label_file in glob.glob(os.path.join(A , '*.txt' ) ):
_UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(A ) as in_file:
_UpperCAmelCase = in_file.readlines()
_UpperCAmelCase = os.path.join(A , f'{label_name}.jpg' )
_UpperCAmelCase = []
for obj_list in obj_lists:
_UpperCAmelCase = obj_list.rstrip('\n' ).split(' ' )
_UpperCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_UpperCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_UpperCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_UpperCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(A )
labels.append(A )
return img_paths, labels
def UpperCAmelCase ( A : list , A : list , A : list[int] , A : tuple[int, int] , A : tuple[float, float] , A : float = 0.0 , ):
'''simple docstring'''
_UpperCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_UpperCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCAmelCase = int(scale_x * output_size[1] )
_UpperCAmelCase = int(scale_y * output_size[0] )
_UpperCAmelCase = []
_UpperCAmelCase = []
for i, index in enumerate(A ):
_UpperCAmelCase = all_img_list[index]
path_list.append(A )
_UpperCAmelCase = all_annos[index]
_UpperCAmelCase = cva.imread(A )
if i == 0: # top-left
_UpperCAmelCase = cva.resize(A , (divid_point_x, divid_point_y) )
_UpperCAmelCase = img
for bbox in img_annos:
_UpperCAmelCase = bbox[1] * scale_x
_UpperCAmelCase = bbox[2] * scale_y
_UpperCAmelCase = bbox[3] * scale_x
_UpperCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCAmelCase = cva.resize(A , (output_size[1] - divid_point_x, divid_point_y) )
_UpperCAmelCase = img
for bbox in img_annos:
_UpperCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_UpperCAmelCase = bbox[2] * scale_y
_UpperCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_UpperCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCAmelCase = cva.resize(A , (divid_point_x, output_size[0] - divid_point_y) )
_UpperCAmelCase = img
for bbox in img_annos:
_UpperCAmelCase = bbox[1] * scale_x
_UpperCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_UpperCAmelCase = bbox[3] * scale_x
_UpperCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCAmelCase = cva.resize(
A , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCAmelCase = img
for bbox in img_annos:
_UpperCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_UpperCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_UpperCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_UpperCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def UpperCAmelCase ( A : int ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
_UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(A ) for _ in range(A ) )
if __name__ == "__main__":
main()
print('''DONE โ
''')
| 719 |
"""simple docstring"""
import os
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' )
with open(A ) as file_hand:
return str(sum(int(A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 24 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCAmelCase ( A : Optional[int] , A : Optional[Any] , A : str , A : Optional[int] , A : Dict ):
'''simple docstring'''
for attribute in key.split('.' ):
_UpperCAmelCase = getattr(A , A )
if weight_type is not None:
_UpperCAmelCase = getattr(A , A ).shape
else:
_UpperCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase ( A : Optional[Any] , A : str , A : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(A )[0].split('.' )[-2]
_UpperCAmelCase = mapped_key.replace('*' , A )
if "weight_g" in name:
_UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
_UpperCAmelCase = 'weight_v'
elif "weight" in name:
_UpperCAmelCase = 'weight'
elif "bias" in name:
_UpperCAmelCase = 'bias'
else:
_UpperCAmelCase = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase ( A : List[Any] , A : List[str] , A : List[str] , A : List[str] , A : List[str] ):
'''simple docstring'''
_UpperCAmelCase = full_name.split('conv_layers.' )[-1]
_UpperCAmelCase = name.split('.' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(A )
def UpperCAmelCase ( A : Dict , A : Any ):
'''simple docstring'''
_UpperCAmelCase = SEWConfig()
if is_finetuned:
_UpperCAmelCase = model.wav_encoder.wav_model.cfg
else:
_UpperCAmelCase = model.cfg
_UpperCAmelCase = fs_config.conv_bias
_UpperCAmelCase = eval(fs_config.conv_feature_layers )
_UpperCAmelCase = [x[0] for x in conv_layers]
_UpperCAmelCase = [x[1] for x in conv_layers]
_UpperCAmelCase = [x[2] for x in conv_layers]
_UpperCAmelCase = 'gelu'
_UpperCAmelCase = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
_UpperCAmelCase = 0.0
_UpperCAmelCase = fs_config.activation_fn.name
_UpperCAmelCase = fs_config.encoder_embed_dim
_UpperCAmelCase = 0.02
_UpperCAmelCase = fs_config.encoder_ffn_embed_dim
_UpperCAmelCase = 1e-5
_UpperCAmelCase = fs_config.encoder_layerdrop
_UpperCAmelCase = fs_config.encoder_attention_heads
_UpperCAmelCase = fs_config.conv_pos_groups
_UpperCAmelCase = fs_config.conv_pos
_UpperCAmelCase = len(A )
_UpperCAmelCase = fs_config.encoder_layers
_UpperCAmelCase = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCAmelCase = model.cfg
_UpperCAmelCase = fs_config.final_dropout
_UpperCAmelCase = fs_config.layerdrop
_UpperCAmelCase = fs_config.activation_dropout
_UpperCAmelCase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCAmelCase = fs_config.attention_dropout
_UpperCAmelCase = fs_config.dropout_input
_UpperCAmelCase = fs_config.dropout
_UpperCAmelCase = fs_config.mask_channel_length
_UpperCAmelCase = fs_config.mask_channel_prob
_UpperCAmelCase = fs_config.mask_length
_UpperCAmelCase = fs_config.mask_prob
_UpperCAmelCase = 'Wav2Vec2FeatureExtractor'
_UpperCAmelCase = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def UpperCAmelCase ( A : List[Any] , A : Any , A : Optional[Any]=None , A : Tuple=None , A : Union[str, Any]=True ):
'''simple docstring'''
if is_finetuned:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCAmelCase = SEWConfig.from_pretrained(A )
else:
_UpperCAmelCase = convert_config(model[0] , A )
_UpperCAmelCase = model[0].eval()
_UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
if is_finetuned:
if dict_path:
_UpperCAmelCase = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.eos_index
_UpperCAmelCase = len(target_dict.symbols )
_UpperCAmelCase = os.path.join(A , 'vocab.json' )
if not os.path.isdir(A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) )
return
os.makedirs(A , exist_ok=A )
with open(A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , A )
_UpperCAmelCase = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , )
_UpperCAmelCase = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
_UpperCAmelCase = SEWForCTC(A )
else:
_UpperCAmelCase = SEWModel(A )
feature_extractor.save_pretrained(A )
recursively_load_weights(A , A , A )
hf_model.save_pretrained(A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowercase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 720 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = 'ylacombe/bark-small'
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = 'en_speaker_1'
_UpperCAmelCase = 'This is a test string'
_UpperCAmelCase = 'speaker_embeddings_path.json'
_UpperCAmelCase = 'speaker_embeddings'
def lowerCamelCase_ ( self , **snake_case ) -> Optional[int]:
return AutoTokenizer.from_pretrained(self.checkpoint , **snake_case )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=snake_case )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_UpperCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCAmelCase = 35
_UpperCAmelCase = 2
_UpperCAmelCase = 8
_UpperCAmelCase = {
'semantic_prompt': np.ones(snake_case ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCAmelCase = processor(text=self.input_string , voice_preset=snake_case )
_UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCAmelCase = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(snake_case , **snake_case )
_UpperCAmelCase = processor(text=self.input_string , voice_preset=snake_case )
_UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=snake_case )
_UpperCAmelCase = processor(text=self.input_string )
_UpperCAmelCase = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=snake_case , return_attention_mask=snake_case , return_token_type_ids=snake_case , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 721 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase = logging.get_logger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , *snake_case , **snake_case ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 24 | 0 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = XGLMTokenizer
_UpperCAmelCase = XGLMTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def lowerCamelCase_ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = XGLMTokenizer(snake_case , keep_accents=snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(snake_case ) , 1008 )
def lowerCamelCase_ ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = XGLMTokenizer(snake_case , keep_accents=snake_case )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case , ['โThis', 'โis', 'โa', 'โt', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsรฉ.' )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'รฉ',
'.',
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def lowerCamelCase_ ( self ) -> int:
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def lowerCamelCase_ ( self ) -> Optional[int]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case , f.name )
_UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=snake_case )
_UpperCAmelCase = pickle.dumps(snake_case )
pickle.loads(snake_case )
def lowerCamelCase_ ( self ) -> int:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsรฉ.'
_UpperCAmelCase = tokenizer.tokenize(snake_case )
_UpperCAmelCase = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = 'Hello World!'
_UpperCAmelCase = [2, 31227, 4447, 35]
self.assertListEqual(snake_case , self.big_tokenizer.encode(snake_case ) )
@slow
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
_UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(snake_case , self.big_tokenizer.encode(snake_case ) )
@slow
def lowerCamelCase_ ( self ) -> str:
# fmt: off
_UpperCAmelCase = {
'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='facebook/xglm-564M' , padding=snake_case , )
| 700 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''beit'''
def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str:
super().__init__(**snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_mask_token
_UpperCAmelCase = use_absolute_position_embeddings
_UpperCAmelCase = use_relative_position_bias
_UpperCAmelCase = use_shared_relative_position_bias
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase = out_indices
_UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = semantic_loss_ignore_index
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( A : Tuple , A : List[Any] , A : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = BertConfig.from_json_file(A )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCAmelCase = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 701 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_05_22, type=int)
lowercase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowercase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowercase = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase = [0] * args.vocab_size
for k, v in counter.items():
lowercase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 24 | 0 |
"""simple docstring"""
import random
def UpperCAmelCase ( A : Optional[Any] , A : Any , A : str ):
'''simple docstring'''
_UpperCAmelCase = a[left_index]
_UpperCAmelCase = left_index + 1
for j in range(left_index + 1 , A ):
if a[j] < pivot:
_UpperCAmelCase , _UpperCAmelCase = a[i], a[j]
i += 1
_UpperCAmelCase , _UpperCAmelCase = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase ( A : Dict , A : str , A : str ):
'''simple docstring'''
if left < right:
_UpperCAmelCase = random.randint(A , right - 1 )
_UpperCAmelCase , _UpperCAmelCase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_UpperCAmelCase = partition(A , A , A )
quick_sort_random(
A , A , A ) # recursive quicksort to the left of the pivot point
quick_sort_random(
A , pivot_index + 1 , A ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = input('Enter numbers separated by a comma:\n' ).strip()
_UpperCAmelCase = [int(A ) for item in user_input.split(',' )]
quick_sort_random(A , 0 , len(A ) )
print(A )
if __name__ == "__main__":
main()
| 702 |
"""simple docstring"""
from itertools import permutations
def UpperCAmelCase ( A : tuple ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(A ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase ( A : int = 10 ):
'''simple docstring'''
return sum(
int(''.join(map(A , A ) ) )
for num in permutations(range(A ) )
if is_substring_divisible(A ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 0 |
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( A : Union[dict, list, tuple, torch.Tensor] ):
'''simple docstring'''
_UpperCAmelCase = []
if isinstance(A , A ):
for v in tree.values():
shapes.extend(_fetch_dims(A ) )
elif isinstance(A , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(A ) )
elif isinstance(A , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def UpperCAmelCase ( A : int , A : Tuple[int, ...] ):
'''simple docstring'''
_UpperCAmelCase = []
for d in reversed(A ):
idx.append(flat_idx % d )
_UpperCAmelCase = flat_idx // d
return tuple(reversed(A ) )
@torch.jit.ignore
def UpperCAmelCase ( A : Sequence[int] , A : Sequence[int] , A : Sequence[int] , A : Optional[Sequence[bool]] = None , A : Optional[Sequence[bool]] = None , ):
'''simple docstring'''
def reduce_edge_list(A : List[bool] ) -> None:
_UpperCAmelCase = True
for i in range(len(A ) ):
_UpperCAmelCase = -1 * (i + 1)
l[reversed_idx] &= tally
_UpperCAmelCase = l[reversed_idx]
if start_edges is None:
_UpperCAmelCase = [s == 0 for s in start]
reduce_edge_list(A )
if end_edges is None:
_UpperCAmelCase = [e == (d - 1) for e, d in zip(A , A )]
reduce_edge_list(A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(A ) == 0:
return [()]
elif len(A ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
_UpperCAmelCase = []
_UpperCAmelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(A , A ):
if s == e:
path_list.append(slice(A , s + 1 ) )
else:
break
_UpperCAmelCase = tuple(A )
_UpperCAmelCase = len(A )
# start == end, and we're done
if divergence_idx == len(A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCAmelCase = start[divergence_idx]
return tuple(
path + (slice(A , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCAmelCase = end[divergence_idx]
return tuple(
path + (slice(A , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_UpperCAmelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def UpperCAmelCase ( A : torch.Tensor , A : int , A : int , A : int ):
'''simple docstring'''
_UpperCAmelCase = t.shape[:no_batch_dims]
_UpperCAmelCase = list(_flat_idx_to_idx(A , A ) )
# _get_minimal_slice_set is inclusive
_UpperCAmelCase = list(_flat_idx_to_idx(flat_end - 1 , A ) )
# Get an ordered list of slices to perform
_UpperCAmelCase = _get_minimal_slice_set(
A , A , A , )
_UpperCAmelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def UpperCAmelCase ( A : Callable , A : Dict[str, Any] , A : int , A : int , A : bool = False , A : Any = None , A : bool = False , ):
'''simple docstring'''
if not (len(A ) > 0):
raise ValueError('Must provide at least one input' )
_UpperCAmelCase = [shape[:no_batch_dims] for shape in _fetch_dims(A )]
_UpperCAmelCase = tuple([max(A ) for s in zip(*A )] )
def _prep_inputs(A : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_UpperCAmelCase = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
_UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_UpperCAmelCase = tensor_tree_map(_prep_inputs , A )
_UpperCAmelCase = None
if _out is not None:
_UpperCAmelCase = tensor_tree_map(lambda A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
_UpperCAmelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_UpperCAmelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(A : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_UpperCAmelCase = 0
_UpperCAmelCase = prepped_outputs
for _ in range(A ):
# Chunk the input
if not low_mem:
_UpperCAmelCase = _select_chunk
else:
_UpperCAmelCase = partial(
_chunk_slice , flat_start=A , flat_end=min(A , i + chunk_size ) , no_batch_dims=len(A ) , )
_UpperCAmelCase = tensor_tree_map(A , A )
# Run the layer on the chunk
_UpperCAmelCase = layer(**A )
# Allocate space for the output
if out is None:
_UpperCAmelCase = tensor_tree_map(lambda A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , A )
# Put the chunk in its pre-allocated space
if isinstance(A , A ):
def assign(A : dict , A : dict ) -> None:
for k, v in da.items():
if isinstance(A , A ):
assign(A , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_UpperCAmelCase = da[k]
assign(A , A )
elif isinstance(A , A ):
for xa, xa in zip(A , A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_UpperCAmelCase = xa
elif isinstance(A , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_UpperCAmelCase = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
_UpperCAmelCase = tensor_tree_map(lambda A : t.view(orig_batch_dims + t.shape[1:] ) , A )
return out
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case = 512 , ) -> str:
_UpperCAmelCase = max_chunk_size
_UpperCAmelCase = None
_UpperCAmelCase = None
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_UpperCAmelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
_UpperCAmelCase = [c for c in candidates if c > min_chunk_size]
_UpperCAmelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
_UpperCAmelCase = test_chunk_size(candidates[i] )
if not viable:
_UpperCAmelCase = (min_viable_chunk_size_index + i) // 2
else:
_UpperCAmelCase = i
_UpperCAmelCase = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> bool:
_UpperCAmelCase = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
_UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
_UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , ) -> int:
_UpperCAmelCase = True
_UpperCAmelCase = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
_UpperCAmelCase = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
_UpperCAmelCase = False
if not consistent:
_UpperCAmelCase = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
_UpperCAmelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 703 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : list[float] , A : str ) -> Optional[int]:
'''simple docstring'''
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(A ):
print(f'{i}\t\t{d}' )
def UpperCAmelCase ( A : list[dict[str, int]] , A : list[float] , A : int ) -> List[Any]:
'''simple docstring'''
for j in range(A ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase ( A : list[dict[str, int]] , A : int , A : int , A : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = [float('inf' )] * vertex_count
_UpperCAmelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
_UpperCAmelCase = distance[u] + w
_UpperCAmelCase = check_negative_cycle(A , A , A )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
class lowercase__ :
'''simple docstring'''
def __init__( self ) -> Dict:
_UpperCAmelCase = {}
def lowerCamelCase_ ( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(snake_case , ' -> ' , ' -> '.join([str(snake_case ) for j in self.vertex[i]] ) )
def lowerCamelCase_ ( self , snake_case , snake_case ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(snake_case )
else:
# else make a new vertex
_UpperCAmelCase = [to_vertex]
def lowerCamelCase_ ( self ) -> None:
# visited array for storing already visited nodes
_UpperCAmelCase = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(snake_case , snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case ) -> None:
# mark start vertex as visited
_UpperCAmelCase = True
print(snake_case , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(snake_case , snake_case )
if __name__ == "__main__":
lowercase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 705 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase__ ( A, A ):
'''simple docstring'''
_UpperCAmelCase = '''swin'''
_UpperCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]:
super().__init__(**snake_case )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(snake_case )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) )
_UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowercase = ''''''
lowercase = ''''''
lowercase = ''''''
lowercase = 1 # (0 is vertical, 1 is horizontal)
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = get_dataset(A , A )
print('Processing...' )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = update_image_and_anno(A , A , A )
for index, image in enumerate(A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCAmelCase = random_chars(32 )
_UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_UpperCAmelCase = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg' , A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Success {index+1}/{len(A )} with {file_name}' )
_UpperCAmelCase = []
for anno in new_annos[index]:
_UpperCAmelCase = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(A )
with open(f'/{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCAmelCase ( A : str , A : str ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = []
for label_file in glob.glob(os.path.join(A , '*.txt' ) ):
_UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(A ) as in_file:
_UpperCAmelCase = in_file.readlines()
_UpperCAmelCase = os.path.join(A , f'{label_name}.jpg' )
_UpperCAmelCase = []
for obj_list in obj_lists:
_UpperCAmelCase = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(A )
labels.append(A )
return img_paths, labels
def UpperCAmelCase ( A : list , A : list , A : int = 1 ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for idx in range(len(A ) ):
_UpperCAmelCase = []
_UpperCAmelCase = img_list[idx]
path_list.append(A )
_UpperCAmelCase = anno_list[idx]
_UpperCAmelCase = cva.imread(A )
if flip_type == 1:
_UpperCAmelCase = cva.flip(A , A )
for bbox in img_annos:
_UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_UpperCAmelCase = cva.flip(A , A )
for bbox in img_annos:
_UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(A )
new_imgs_list.append(A )
return new_imgs_list, new_annos_lists, path_list
def UpperCAmelCase ( A : int = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
_UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(A ) for _ in range(A ) )
if __name__ == "__main__":
main()
print('''DONE โ
''')
| 706 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str:
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any:
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case )
| 24 | 0 |
"""simple docstring"""
lowercase = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 707 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) )
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = CvtModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = CvtForImageClassification(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = CvtModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCamelCase_ ( self ) -> str:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCamelCase_ ( self ) -> int:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
def check_hidden_states_output(snake_case , snake_case , snake_case ):
_UpperCAmelCase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(snake_case ) , snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self ) -> Dict:
pass
@slow
def lowerCamelCase_ ( self ) -> Dict:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = CvtModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
_UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 24 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''marian'''
_UpperCAmelCase = ['''past_key_values''']
_UpperCAmelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , snake_case=58101 , snake_case=None , snake_case=1024 , snake_case=12 , snake_case=4096 , snake_case=16 , snake_case=12 , snake_case=4096 , snake_case=16 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case=True , snake_case="gelu" , snake_case=1024 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=58100 , snake_case=False , snake_case=58100 , snake_case=0 , snake_case=0 , snake_case=True , **snake_case , ) -> Dict:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = decoder_vocab_size or vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , decoder_start_token_id=snake_case , forced_eos_token_id=snake_case , **snake_case , )
class lowercase__ ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_UpperCAmelCase = {0: 'batch'}
_UpperCAmelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
_UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_UpperCAmelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_UpperCAmelCase , _UpperCAmelCase = self.num_layers
for i in range(snake_case ):
_UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
_UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_UpperCAmelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = super().outputs
else:
_UpperCAmelCase = super(snake_case , self ).outputs
if self.use_past:
_UpperCAmelCase , _UpperCAmelCase = self.num_layers
for i in range(snake_case ):
_UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
_UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def lowerCamelCase_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ) -> Mapping[str, Any]:
_UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case , snake_case , snake_case , snake_case , snake_case )
# Generate decoder inputs
_UpperCAmelCase = seq_length if not self.use_past else 1
_UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case , snake_case , snake_case , snake_case , snake_case )
_UpperCAmelCase = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_UpperCAmelCase = dict(**snake_case , **snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs['input_ids'].shape
_UpperCAmelCase = common_inputs['decoder_input_ids'].shape[1]
_UpperCAmelCase , _UpperCAmelCase = self.num_attention_heads
_UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase = decoder_seq_length + 3
_UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_UpperCAmelCase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(snake_case , snake_case )] , dim=1 )
_UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_UpperCAmelCase , _UpperCAmelCase = self.num_layers
_UpperCAmelCase = min(snake_case , snake_case )
_UpperCAmelCase = max(snake_case , snake_case ) - min_num_layers
_UpperCAmelCase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case ),
torch.zeros(snake_case ),
torch.zeros(snake_case ),
torch.zeros(snake_case ),
) )
# TODO: test this.
_UpperCAmelCase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(snake_case , snake_case ):
common_inputs["past_key_values"].append((torch.zeros(snake_case ), torch.zeros(snake_case )) )
return common_inputs
def lowerCamelCase_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ) -> Mapping[str, Any]:
_UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case , snake_case , snake_case , snake_case , snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase , _UpperCAmelCase = self.num_layers
_UpperCAmelCase , _UpperCAmelCase = self.num_attention_heads
_UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase = common_inputs['attention_mask'].dtype
_UpperCAmelCase = torch.cat(
[common_inputs['attention_mask'], torch.ones(snake_case , snake_case , dtype=snake_case )] , dim=1 )
_UpperCAmelCase = [
(torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(snake_case )
]
return common_inputs
def lowerCamelCase_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = tokenizer.num_special_tokens_to_add(snake_case )
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_UpperCAmelCase = dict(tokenizer(snake_case , return_tensors=snake_case ) )
return common_inputs
def lowerCamelCase_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case )
else:
_UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case )
return common_inputs
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> str:
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = super()._flatten_past_key_values_(snake_case , snake_case , snake_case , snake_case )
else:
_UpperCAmelCase = super(snake_case , self )._flatten_past_key_values_(
snake_case , snake_case , snake_case , snake_case )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 708 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( A : int , A : int , A : int ):
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_UpperCAmelCase = b * b - 4 * a * c
_UpperCAmelCase = (-b + sqrt(A )) / (2 * a)
_UpperCAmelCase = (-b - sqrt(A )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 24 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = False , snake_case = False , snake_case = None , **snake_case , ) -> Tuple:
_UpperCAmelCase = path_or_paths
_UpperCAmelCase = split if split or isinstance(snake_case , snake_case ) else 'train'
_UpperCAmelCase = features
_UpperCAmelCase = cache_dir
_UpperCAmelCase = keep_in_memory
_UpperCAmelCase = streaming
_UpperCAmelCase = num_proc
_UpperCAmelCase = kwargs
@abstractmethod
def lowerCamelCase_ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , snake_case = None , snake_case = None , snake_case = False , snake_case = False , snake_case = None , **snake_case , ) -> Tuple:
_UpperCAmelCase = features
_UpperCAmelCase = cache_dir
_UpperCAmelCase = keep_in_memory
_UpperCAmelCase = streaming
_UpperCAmelCase = num_proc
_UpperCAmelCase = kwargs
@abstractmethod
def lowerCamelCase_ ( self ) -> Union[Dataset, IterableDataset]:
pass
| 709 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = BarthezTokenizer
_UpperCAmelCase = BarthezTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def lowerCamelCase_ ( self ) -> Optional[int]:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case )
_UpperCAmelCase = tokenizer
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case ) , 101122 )
def lowerCamelCase_ ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsรฉ.'
_UpperCAmelCase = tokenizer.tokenize(snake_case )
_UpperCAmelCase = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
# fmt: off
_UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
'Le transformeur est un modรจle d\'apprentissage profond introduit en 2017, '
'utilisรฉ principalement dans le domaine du traitement automatique des langues (TAL).',
'ร l\'instar des rรฉseaux de neurones rรฉcurrents (RNN), les transformeurs sont conรงus '
'pour gรฉrer des donnรฉes sรฉquentielles, telles que le langage naturel, pour des tรขches '
'telles que la traduction et la synthรจse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
| 24 | 0 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''umt5'''
_UpperCAmelCase = ['''past_key_values''']
def __init__( self , snake_case=250112 , snake_case=512 , snake_case=64 , snake_case=1024 , snake_case=8 , snake_case=None , snake_case=6 , snake_case=32 , snake_case=128 , snake_case=0.1 , snake_case=1E-6 , snake_case=1.0 , snake_case="gated-gelu" , snake_case=True , snake_case=True , snake_case="T5Tokenizer" , snake_case=True , snake_case=0 , snake_case=1 , snake_case=0 , **snake_case , ) -> List[Any]:
super().__init__(
is_encoder_decoder=snake_case , tokenizer_class=snake_case , tie_word_embeddings=snake_case , pad_token_id=snake_case , eos_token_id=snake_case , decoder_start_token_id=snake_case , **snake_case , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = d_kv
_UpperCAmelCase = d_ff
_UpperCAmelCase = num_layers
_UpperCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCAmelCase = num_heads
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = relative_attention_max_distance
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = feed_forward_proj
_UpperCAmelCase = use_cache
_UpperCAmelCase = self.feed_forward_proj.split('-' )
_UpperCAmelCase = act_info[-1]
_UpperCAmelCase = act_info[0] == 'gated'
if len(snake_case ) > 1 and act_info[0] != "gated" or len(snake_case ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
_UpperCAmelCase = 'gelu_new'
@property
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return self.d_model
@property
def lowerCamelCase_ ( self ) -> Tuple:
return self.num_heads
@property
def lowerCamelCase_ ( self ) -> Optional[int]:
return self.num_layers
class lowercase__ ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCAmelCase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
_UpperCAmelCase = 'past_encoder_sequence + sequence'
_UpperCAmelCase = {0: 'batch'}
_UpperCAmelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
_UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCamelCase_ ( self ) -> int:
return 13
@property
def lowerCamelCase_ ( self ) -> float:
return 5E-4
| 710 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = DiTPipeline
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
_UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]:
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = pipe(**snake_case ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1E-3 )
def lowerCamelCase_ ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 24 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = UnCLIPImageVariationPipeline
_UpperCAmelCase = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
_UpperCAmelCase = IMAGE_VARIATION_BATCH_PARAMS
_UpperCAmelCase = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
_UpperCAmelCase = False
@property
def lowerCamelCase_ ( self ) -> List[Any]:
return 32
@property
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return 32
@property
def lowerCamelCase_ ( self ) -> int:
return self.time_input_dim
@property
def lowerCamelCase_ ( self ) -> Any:
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self ) -> Tuple:
return 100
@property
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowerCamelCase_ ( self ) -> Any:
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(snake_case )
@property
def lowerCamelCase_ ( self ) -> int:
torch.manual_seed(0 )
_UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(snake_case )
@property
def lowerCamelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCAmelCase = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
_UpperCAmelCase = UnCLIPTextProjModel(**snake_case )
return model
@property
def lowerCamelCase_ ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
_UpperCAmelCase = UNetaDConditionModel(**snake_case )
return model
@property
def lowerCamelCase_ ( self ) -> Tuple:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowerCamelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowerCamelCase_ ( self ) -> List[str]:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
_UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.dummy_decoder
_UpperCAmelCase = self.dummy_text_proj
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = self.dummy_tokenizer
_UpperCAmelCase = self.dummy_super_res_first
_UpperCAmelCase = self.dummy_super_res_last
_UpperCAmelCase = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1000 , )
_UpperCAmelCase = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1000 , )
_UpperCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 )
_UpperCAmelCase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowerCamelCase_ ( self , snake_case , snake_case=0 , snake_case=True ) -> int:
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
if pil_image:
_UpperCAmelCase = input_image * 0.5 + 0.5
_UpperCAmelCase = input_image.clamp(0 , 1 )
_UpperCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase = DiffusionPipeline.numpy_to_pil(snake_case )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
_UpperCAmelCase = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = pipe(**snake_case )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = pipe(
**snake_case , return_dict=snake_case , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
_UpperCAmelCase = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = pipe(**snake_case )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = pipe(
**snake_case , return_dict=snake_case , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
_UpperCAmelCase = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
_UpperCAmelCase = pipe(**snake_case )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
_UpperCAmelCase = pipe(
**snake_case , return_dict=snake_case , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_UpperCAmelCase = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.device('cpu' )
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
_UpperCAmelCase = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(0 )
_UpperCAmelCase = pipe.decoder.dtype
_UpperCAmelCase = 1
_UpperCAmelCase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_UpperCAmelCase = pipe.prepare_latents(
snake_case , dtype=snake_case , device=snake_case , generator=snake_case , latents=snake_case , scheduler=DummyScheduler() )
_UpperCAmelCase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_UpperCAmelCase = pipe.prepare_latents(
snake_case , dtype=snake_case , device=snake_case , generator=snake_case , latents=snake_case , scheduler=DummyScheduler() )
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = pipe(
**snake_case , decoder_latents=snake_case , super_res_latents=snake_case ).images
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
# Don't pass image, instead pass embedding
_UpperCAmelCase = pipeline_inputs.pop('image' )
_UpperCAmelCase = pipe.image_encoder(snake_case ).image_embeds
_UpperCAmelCase = pipe(
**snake_case , decoder_latents=snake_case , super_res_latents=snake_case , image_embeddings=snake_case , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_UpperCAmelCase = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=snake_case , expected_max_diff=snake_case )
@skip_mps
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = torch_device == 'cpu'
_UpperCAmelCase = True
_UpperCAmelCase = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=snake_case , relax_max_difference=snake_case , additional_params_copy_to_batched_inputs=snake_case , )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_UpperCAmelCase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=snake_case , additional_params_copy_to_batched_inputs=snake_case , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=snake_case )
@skip_mps
def lowerCamelCase_ ( self ) -> int:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCamelCase_ ( self ) -> List[Any]:
return super().test_save_load_local()
@skip_mps
def lowerCamelCase_ ( self ) -> List[Any]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
_UpperCAmelCase = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipeline.to(snake_case )
pipeline.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_UpperCAmelCase = pipeline(
snake_case , generator=snake_case , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(snake_case , snake_case , 15 )
| 711 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase ( A : int ):
'''simple docstring'''
return sum(int(A ) for c in str(abs(A ) ) )
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A : Callable , A : int ) -> None:
_UpperCAmelCase = f'{func.__name__}({value})'
_UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : list[int] , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = sum(A )
create_state_space_tree(A , A , A , A , A , A )
return result
def UpperCAmelCase ( A : list[int] , A : int , A : int , A : list[int] , A : list[list[int]] , A : int , ):
'''simple docstring'''
if sum(A ) > max_sum or (remaining_nums_sum + sum(A )) < max_sum:
return
if sum(A ) == max_sum:
result.append(A )
return
for index in range(A , len(A ) ):
create_state_space_tree(
A , A , index + 1 , [*path, nums[index]] , A , remaining_nums_sum - nums[index] , )
lowercase = [3, 34, 4, 12, 5, 2]
lowercase = 9
lowercase = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 712 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
create_all_state(1 , A , A , [] , A )
return result
def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def UpperCAmelCase ( A : list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*A )
if __name__ == "__main__":
lowercase = 4
lowercase = 2
lowercase = generate_all_combinations(n, k)
print_all_state(total_list)
| 24 | 0 |
"""simple docstring"""
lowercase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 713 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase ( A : Path , A : list ):
'''simple docstring'''
_UpperCAmelCase = '\n'.join(A )
Path(A ).open('w' ).writelines(A )
lowercase = '''patrickvonplaten/t5-tiny-random'''
lowercase = '''sshleifer/bart-tiny-random'''
lowercase = '''sshleifer/tiny-mbart'''
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case ) -> str:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(snake_case , snake_case )
_UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(snake_case , 'argv' , snake_case ):
run_generate()
assert Path(snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase_ ( self ) -> str:
self.run_eval_tester(snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
self.run_eval_tester(snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> Dict:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist groรartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = str(tmp_dir / 'scores.json' )
_UpperCAmelCase = str(tmp_dir / 'val.target' )
_dump_articles(snake_case , text['en'] )
_dump_articles(snake_case , text['de'] )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(snake_case , 'argv' , snake_case ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args']
_UpperCAmelCase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case ).exists()
os.remove(Path(snake_case ) )
| 24 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_05_22, type=int)
lowercase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowercase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowercase = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase = [0] * args.vocab_size
for k, v in counter.items():
lowercase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 714 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase = logging.get_logger(__name__)
lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A )
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def UpperCAmelCase ( A : float , A : float , A : bool = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(A ), magnitude * sin(A )]
return [magnitude * cos(radians(A ) ), magnitude * sin(radians(A ) )]
def UpperCAmelCase ( A : NDArray[floataa] , A : NDArray[floataa] , A : float = 10**-1 ):
'''simple docstring'''
_UpperCAmelCase = cross(A , A )
_UpperCAmelCase = sum(A )
return abs(A ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 715 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = generator('Something there' )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
_UpperCAmelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
with self.assertRaises(snake_case ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
_UpperCAmelCase = 3
_UpperCAmelCase = generator(
'Something there' , num_return_sequences=snake_case , num_beams=snake_case , )
_UpperCAmelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case , snake_case )
_UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case )
self.assertEqual(
snake_case , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
_UpperCAmelCase = generator.model.config.eos_token_id
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = generator(
['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , )
self.assertEqual(
snake_case , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
| 24 | 0 |
"""simple docstring"""
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCAmelCase ( A : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = botoa.client('iam' )
_UpperCAmelCase = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A , AssumeRolePolicyDocument=json.dumps(A , indent=2 ) )
_UpperCAmelCase = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A , PolicyName=f'{role_name}_policy_permission' , PolicyDocument=json.dumps(A , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'role {role_name} already exists. Using existing one' )
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = botoa.client('iam' )
return iam_client.get_role(RoleName=A )["Role"]["Arn"]
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , A , )
_UpperCAmelCase = None
if credentials_configuration == 0:
_UpperCAmelCase = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
_UpperCAmelCase = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
_UpperCAmelCase = _ask_field('AWS Access Key ID: ' )
_UpperCAmelCase = aws_access_key_id
_UpperCAmelCase = _ask_field('AWS Secret Access Key: ' )
_UpperCAmelCase = aws_secret_access_key
_UpperCAmelCase = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
_UpperCAmelCase = aws_region
_UpperCAmelCase = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , A , )
if role_management == 0:
_UpperCAmelCase = _ask_field('Enter your IAM role name: ' )
else:
_UpperCAmelCase = 'accelerate_sagemaker_execution_role'
print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(A )
_UpperCAmelCase = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
_UpperCAmelCase = None
if is_custom_docker_image:
_UpperCAmelCase = _ask_field('Enter your Docker image: ' , lambda A : str(A ).lower() )
_UpperCAmelCase = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
_UpperCAmelCase = None
if is_sagemaker_inputs_enabled:
_UpperCAmelCase = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda A : str(A ).lower() , )
_UpperCAmelCase = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
_UpperCAmelCase = None
if is_sagemaker_metrics_enabled:
_UpperCAmelCase = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda A : str(A ).lower() , )
_UpperCAmelCase = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
_UpperCAmelCase = {}
_UpperCAmelCase = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
if use_dynamo:
_UpperCAmelCase = 'dynamo_'
_UpperCAmelCase = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_UpperCAmelCase = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
if use_custom_options:
_UpperCAmelCase = _ask_options(
'Which mode do you want to use?' , A , lambda A : TORCH_DYNAMO_MODES[int(A )] , default='default' , )
_UpperCAmelCase = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
_UpperCAmelCase = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
_UpperCAmelCase = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
_UpperCAmelCase = _ask_options(
A , A , lambda A : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_UpperCAmelCase = _ask_field(A , lambda A : str(A ).lower() , default='ml.p3.2xlarge' )
_UpperCAmelCase = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_UpperCAmelCase = _ask_field(
'How many machines do you want use? [1]: ' , A , default=1 , )
_UpperCAmelCase = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=A , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A , use_cpu=A , dynamo_config=A , eca_instance_type=A , profile=A , region=A , iam_role_name=A , mixed_precision=A , num_machines=A , sagemaker_inputs_file=A , sagemaker_metrics_file=A , )
| 716 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_UpperCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowercase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 0 |
"""simple docstring"""
import os
lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(A ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase ( A : str = "/p089_roman.txt" ):
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(A ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(A )
_UpperCAmelCase = generate_roman_numerals(A )
savings += len(A ) - len(A )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 717 |
"""simple docstring"""
import os
lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(A ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase ( A : str = "/p089_roman.txt" ):
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(A ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(A )
_UpperCAmelCase = generate_roman_numerals(A )
savings += len(A ) - len(A )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 0 |
"""simple docstring"""
from random import randint, random
def UpperCAmelCase ( A : int , A : int , A : int , A : bool = False , A : bool = False , A : int = 5 , ):
'''simple docstring'''
_UpperCAmelCase = [[-1] * number_of_cells] # Create a highway without any car
_UpperCAmelCase = 0
_UpperCAmelCase = max(A , 0 )
while i < number_of_cells:
_UpperCAmelCase = (
randint(0 , A ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCAmelCase ( A : list , A : int ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = highway_now[car_index + 1 :]
for cell in range(len(A ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(A , -1 )
def UpperCAmelCase ( A : list , A : float , A : int ):
'''simple docstring'''
_UpperCAmelCase = len(A )
# Beforce calculations, the highway is empty
_UpperCAmelCase = [-1] * number_of_cells
for car_index in range(A ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_UpperCAmelCase = min(highway_now[car_index] + 1 , A )
# Number of empty cell before the next car
_UpperCAmelCase = get_distance(A , A ) - 1
# We can't have the car causing an accident
_UpperCAmelCase = min(next_highway[car_index] , A )
if random() < probability:
# Randomly, a driver will slow down
_UpperCAmelCase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCAmelCase ( A : list , A : int , A : float , A : int ):
'''simple docstring'''
_UpperCAmelCase = len(highway[0] )
for i in range(A ):
_UpperCAmelCase = update(highway[i] , A , A )
_UpperCAmelCase = [-1] * number_of_cells
for car_index in range(A ):
_UpperCAmelCase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_UpperCAmelCase = (car_index + speed) % number_of_cells
# Commit the change of position
_UpperCAmelCase = speed
highway.append(A )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
_UpperCAmelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
| 24 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_UpperCAmelCase = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
_UpperCAmelCase = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case , snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_UpperCAmelCase = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(snake_case , return_tensors='np' )
_UpperCAmelCase = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = processor(text=snake_case )
_UpperCAmelCase = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(snake_case ):
processor()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(snake_case )
_UpperCAmelCase = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 719 |
"""simple docstring"""
import os
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' )
with open(A ) as file_hand:
return str(sum(int(A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 24 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase = logging.getLogger(__name__)
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=A , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=A , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=A , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=A , default='data/dump' , help='The dump file prefix.' )
_UpperCAmelCase = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
_UpperCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
_UpperCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
_UpperCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
_UpperCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>`
_UpperCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
_UpperCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
_UpperCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
_UpperCAmelCase = fp.readlines()
logger.info('Start encoding' )
logger.info(f'{len(A )} examples to process.' )
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = 1_0000
_UpperCAmelCase = time.time()
for text in data:
_UpperCAmelCase = f'{bos} {text.strip()} {sep}'
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A )
rslt.append(A )
iter += 1
if iter % interval == 0:
_UpperCAmelCase = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
_UpperCAmelCase = time.time()
logger.info('Finished binarization' )
logger.info(f'{len(A )} examples processed.' )
_UpperCAmelCase = f'{args.dump_file}.{args.tokenizer_name}.pickle'
_UpperCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
_UpperCAmelCase = [np.uintaa(A ) for d in rslt]
else:
_UpperCAmelCase = [np.intaa(A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(A , 'wb' ) as handle:
pickle.dump(rslt_ , A , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 720 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase = '''src/diffusers'''
lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase = spec.loader.load_module()
def UpperCAmelCase ( A : Any , A : Tuple ):
'''simple docstring'''
return line.startswith(A ) or len(A ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , A ) is not None
def UpperCAmelCase ( A : Dict ):
'''simple docstring'''
_UpperCAmelCase = object_name.split('.' )
_UpperCAmelCase = 0
# First let's find the module where our object lives.
_UpperCAmelCase = parts[i]
while i < len(A ) and not os.path.isfile(os.path.join(A , f'{module}.py' ) ):
i += 1
if i < len(A ):
_UpperCAmelCase = os.path.join(A , parts[i] )
if i >= len(A ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(A , f'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
_UpperCAmelCase = f.readlines()
# Now let's find the class / func in the code!
_UpperCAmelCase = ''
_UpperCAmelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(A ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(A ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_UpperCAmelCase = line_index
while line_index < len(A ) and _should_continue(lines[line_index] , A ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase = lines[start_index:line_index]
return "".join(A )
lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowercase = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCAmelCase ( A : Dict ):
'''simple docstring'''
_UpperCAmelCase = code.split('\n' )
_UpperCAmelCase = 0
while idx < len(A ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(A ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
_UpperCAmelCase = len(get_indent(A ) ) > 0
if has_indent:
_UpperCAmelCase = f'class Bla:\n{code}'
_UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=A )
_UpperCAmelCase = black.format_str(A , mode=A )
_UpperCAmelCase , _UpperCAmelCase = style_docstrings_in_code(A )
return result[len('class Bla:\n' ) :] if has_indent else result
def UpperCAmelCase ( A : int , A : int=False ):
'''simple docstring'''
with open(A , 'r' , encoding='utf-8' , newline='\n' ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = []
_UpperCAmelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(A ):
_UpperCAmelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = search.groups()
_UpperCAmelCase = find_code_in_diffusers(A )
_UpperCAmelCase = get_indent(A )
_UpperCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_UpperCAmelCase = theoretical_indent
_UpperCAmelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_UpperCAmelCase = True
while line_index < len(A ) and should_continue:
line_index += 1
if line_index >= len(A ):
break
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _should_continue(A , A ) and re.search(f'^{indent}# End copy' , A ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase = lines[start_index:line_index]
_UpperCAmelCase = ''.join(A )
# Remove any nested `Copied from` comments to avoid circular copies
_UpperCAmelCase = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(A ) is None]
_UpperCAmelCase = '\n'.join(A )
# Before comparing, use the `replace_pattern` on the original code.
if len(A ) > 0:
_UpperCAmelCase = replace_pattern.replace('with' , '' ).split(',' )
_UpperCAmelCase = [_re_replace_pattern.search(A ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = pattern.groups()
_UpperCAmelCase = re.sub(A , A , A )
if option.strip() == "all-casing":
_UpperCAmelCase = re.sub(obja.lower() , obja.lower() , A )
_UpperCAmelCase = re.sub(obja.upper() , obja.upper() , A )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_UpperCAmelCase = blackify(lines[start_index - 1] + theoretical_code )
_UpperCAmelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_UpperCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_UpperCAmelCase = start_index + 1
if overwrite and len(A ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(A )
return diffs
def UpperCAmelCase ( A : bool = False ):
'''simple docstring'''
_UpperCAmelCase = glob.glob(os.path.join(A , '**/*.py' ) , recursive=A )
_UpperCAmelCase = []
for filename in all_files:
_UpperCAmelCase = is_copy_consistent(A , A )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(A ) > 0:
_UpperCAmelCase = '\n'.join(A )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 721 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase = logging.get_logger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , *snake_case , **snake_case ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 24 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def UpperCAmelCase ( A : Callable , A : float , A : float , A : float , A : float ):
'''simple docstring'''
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(A ):
_UpperCAmelCase = y[k] + step_size * ode_func(A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''beit'''
def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str:
super().__init__(**snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_mask_token
_UpperCAmelCase = use_absolute_position_embeddings
_UpperCAmelCase = use_relative_position_bias
_UpperCAmelCase = use_shared_relative_position_bias
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase = out_indices
_UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = semantic_loss_ignore_index
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 0 |
"""simple docstring"""
lowercase = range(2, 20 + 1)
lowercase = [10**k for k in range(ks[-1] + 1)]
lowercase = {}
def UpperCAmelCase ( A : int , A : Tuple , A : List[str] , A : Tuple ):
'''simple docstring'''
_UpperCAmelCase = sum(a_i[j] for j in range(A , len(A ) ) )
_UpperCAmelCase = sum(a_i[j] * base[j] for j in range(min(len(A ) , A ) ) )
_UpperCAmelCase , _UpperCAmelCase = 0, 0
_UpperCAmelCase = n - i
_UpperCAmelCase = memo.get(A )
if sub_memo is not None:
_UpperCAmelCase = sub_memo.get(A )
if jumps is not None and len(A ) > 0:
# find and make the largest jump without going over
_UpperCAmelCase = -1
for _k in range(len(A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCAmelCase = _k
break
if max_jump >= 0:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCAmelCase = diff + c
for j in range(min(A , len(A ) ) ):
_UpperCAmelCase , _UpperCAmelCase = divmod(A , 10 )
if new_c > 0:
add(A , A , A )
else:
_UpperCAmelCase = []
else:
_UpperCAmelCase = {c: []}
_UpperCAmelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCAmelCase , _UpperCAmelCase = next_term(A , k - 1 , i + dn , A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCAmelCase , _UpperCAmelCase = compute(A , A , i + dn , A )
diff += _diff
dn += terms_jumped
_UpperCAmelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCAmelCase = 0
while j < len(A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A , (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase ( A : Optional[int] , A : Any , A : Tuple , A : Optional[int] ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(A ):
a_i.extend([0 for _ in range(k - len(A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCAmelCase = i
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0, 0, 0
for j in range(len(A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCAmelCase = ds_c + ds_b
diff += addend
_UpperCAmelCase = 0
for j in range(A ):
_UpperCAmelCase = a_i[j] + addend
_UpperCAmelCase , _UpperCAmelCase = divmod(A , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A , A , A )
return diff, i - start_i
def UpperCAmelCase ( A : Tuple , A : Dict , A : List[Any] ):
'''simple docstring'''
for j in range(A , len(A ) ):
_UpperCAmelCase = digits[j] + addend
if s >= 10:
_UpperCAmelCase , _UpperCAmelCase = divmod(A , 10 )
_UpperCAmelCase = addend // 10 + quotient
else:
_UpperCAmelCase = s
_UpperCAmelCase = addend // 10
if addend == 0:
break
while addend > 0:
_UpperCAmelCase , _UpperCAmelCase = divmod(A , 10 )
digits.append(A )
def UpperCAmelCase ( A : int = 10**15 ):
'''simple docstring'''
_UpperCAmelCase = [1]
_UpperCAmelCase = 1
_UpperCAmelCase = 0
while True:
_UpperCAmelCase , _UpperCAmelCase = next_term(A , 20 , i + dn , A )
dn += terms_jumped
if dn == n - i:
break
_UpperCAmelCase = 0
for j in range(len(A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 701 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_05_22, type=int)
lowercase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowercase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowercase = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase = [0] * args.vocab_size
for k, v in counter.items():
lowercase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 24 | 0 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase = logging.get_logger(__name__)
lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A )
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
| 702 |
"""simple docstring"""
from itertools import permutations
def UpperCAmelCase ( A : tuple ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(A ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase ( A : int = 10 ):
'''simple docstring'''
return sum(
int(''.join(map(A , A ) ) )
for num in permutations(range(A ) )
if is_substring_divisible(A ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 0 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowercase = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = 'https://pypi.org/pypi/diffusers/json'
_UpperCAmelCase = json.loads(request.urlopen(A ).read() )['releases'].keys()
return sorted(A , key=lambda A : version.Version(A ) )
def UpperCAmelCase ( ):
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(A )
os.makedirs(A , exist_ok=A )
_UpperCAmelCase = Path(A ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( A : Union[str, os.PathLike] ):
'''simple docstring'''
init_hf_modules()
_UpperCAmelCase = Path(A ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(A , exist_ok=A )
_UpperCAmelCase = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( A : List[Any] ):
'''simple docstring'''
with open(A , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = f.read()
# Imports of the form `import .xxx`
_UpperCAmelCase = re.findall('^\s*import\s+\.(\S+)\s*$' , A , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , A , flags=re.MULTILINE )
# Unique-ify
return list(set(A ) )
def UpperCAmelCase ( A : List[str] ):
'''simple docstring'''
_UpperCAmelCase = False
_UpperCAmelCase = [module_file]
_UpperCAmelCase = []
# Let's recurse through all relative imports
while not no_change:
_UpperCAmelCase = []
for f in files_to_check:
new_imports.extend(get_relative_imports(A ) )
_UpperCAmelCase = Path(A ).parent
_UpperCAmelCase = [str(module_path / m ) for m in new_imports]
_UpperCAmelCase = [f for f in new_import_files if f not in all_relative_imports]
_UpperCAmelCase = [f'{f}.py' for f in new_import_files]
_UpperCAmelCase = len(A ) == 0
all_relative_imports.extend(A )
return all_relative_imports
def UpperCAmelCase ( A : str ):
'''simple docstring'''
with open(A , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = f.read()
# Imports of the form `import xxx`
_UpperCAmelCase = re.findall('^\s*import\s+(\S+)\s*$' , A , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , A , flags=re.MULTILINE )
# Only keep the top-level module
_UpperCAmelCase = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
_UpperCAmelCase = list(set(A ) )
_UpperCAmelCase = []
for imp in imports:
try:
importlib.import_module(A )
except ImportError:
missing_packages.append(A )
if len(A ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f'{", ".join(A )}. Run `pip install {" ".join(A )}`' )
return get_relative_imports(A )
def UpperCAmelCase ( A : Dict , A : Tuple ):
'''simple docstring'''
_UpperCAmelCase = module_path.replace(os.path.sep , '.' )
_UpperCAmelCase = importlib.import_module(A )
if class_name is None:
return find_pipeline_class(A )
return getattr(A , A )
def UpperCAmelCase ( A : Tuple ):
'''simple docstring'''
from ..pipelines import DiffusionPipeline
_UpperCAmelCase = dict(inspect.getmembers(A , inspect.isclass ) )
_UpperCAmelCase = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , A )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
f' {loaded_module}.' )
_UpperCAmelCase = cls
return pipeline_class
def UpperCAmelCase ( A : Union[str, os.PathLike] , A : str , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , A : bool = False , A : Optional[Dict[str, str]] = None , A : Optional[Union[bool, str]] = None , A : Optional[str] = None , A : bool = False , ):
'''simple docstring'''
_UpperCAmelCase = str(A )
_UpperCAmelCase = os.path.join(A , A )
if os.path.isfile(A ):
_UpperCAmelCase = module_file_or_url
_UpperCAmelCase = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
_UpperCAmelCase = get_diffusers_versions()
# cut ".dev0"
_UpperCAmelCase = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
_UpperCAmelCase = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
_UpperCAmelCase = f'v{revision}'
elif revision == "main":
_UpperCAmelCase = revision
else:
raise ValueError(
f'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
f' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
_UpperCAmelCase = COMMUNITY_PIPELINES_URL.format(revision=A , pipeline=A )
try:
_UpperCAmelCase = cached_download(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , )
_UpperCAmelCase = 'git'
_UpperCAmelCase = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
_UpperCAmelCase = hf_hub_download(
A , A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , )
_UpperCAmelCase = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
_UpperCAmelCase = check_imports(A )
# Now we move the module inside our cached dynamic modules.
_UpperCAmelCase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(A )
_UpperCAmelCase = Path(A ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(A , submodule_path / module_file )
for module_needed in modules_needed:
_UpperCAmelCase = f'{module_needed}.py'
shutil.copy(os.path.join(A , A ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(A , A ):
_UpperCAmelCase = use_auth_token
elif use_auth_token is True:
_UpperCAmelCase = HfFolder.get_token()
else:
_UpperCAmelCase = None
_UpperCAmelCase = model_info(A , revision=A , token=A ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_UpperCAmelCase = submodule_path / commit_hash
_UpperCAmelCase = full_submodule + os.path.sep + commit_hash
create_dynamic_module(A )
if not (submodule_path / module_file).exists():
shutil.copy(A , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
A , f'{module_needed}.py' , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , )
return os.path.join(A , A )
def UpperCAmelCase ( A : Union[str, os.PathLike] , A : str , A : Optional[str] = None , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , A : bool = False , A : Optional[Dict[str, str]] = None , A : Optional[Union[bool, str]] = None , A : Optional[str] = None , A : bool = False , **A : Dict , ):
'''simple docstring'''
_UpperCAmelCase = get_cached_module_file(
A , A , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , )
return get_class_in_module(A , final_module.replace('.py' , '' ) )
| 703 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def UpperCAmelCase ( A : Optional[int] ) -> str:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def UpperCAmelCase ( A : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = np.max(_outputs , axis=-1 , keepdims=A )
_UpperCAmelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=A )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''sigmoid'''
_UpperCAmelCase = '''softmax'''
_UpperCAmelCase = '''none'''
@add_end_docstrings(
A, R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''', )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = False
_UpperCAmelCase = ClassificationFunction.NONE
def __init__( self , **snake_case ) -> List[str]:
super().__init__(**snake_case )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCamelCase_ ( self , snake_case=None , snake_case=None , snake_case="" , **snake_case ) -> Optional[Any]:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
_UpperCAmelCase = tokenizer_kwargs
_UpperCAmelCase = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
_UpperCAmelCase = self.model.config.return_all_scores
if isinstance(snake_case , snake_case ) or top_k is None:
_UpperCAmelCase = top_k
_UpperCAmelCase = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , snake_case , )
if return_all_scores:
_UpperCAmelCase = None
else:
_UpperCAmelCase = 1
if isinstance(snake_case , snake_case ):
_UpperCAmelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_UpperCAmelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *snake_case , **snake_case ) -> Dict:
_UpperCAmelCase = super().__call__(*snake_case , **snake_case )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_UpperCAmelCase = 'top_k' not in kwargs
if isinstance(args[0] , snake_case ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCamelCase_ ( self , snake_case , **snake_case ) -> Dict[str, GenericTensor]:
_UpperCAmelCase = self.framework
if isinstance(snake_case , snake_case ):
return self.tokenizer(**snake_case , return_tensors=snake_case , **snake_case )
elif isinstance(snake_case , snake_case ) and len(snake_case ) == 1 and isinstance(inputs[0] , snake_case ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case , **snake_case )
elif isinstance(snake_case , snake_case ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(snake_case , return_tensors=snake_case , **snake_case )
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
return self.model(**snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case=None , snake_case=1 , snake_case=True ) -> List[Any]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_UpperCAmelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_UpperCAmelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
_UpperCAmelCase = self.model.config.function_to_apply
else:
_UpperCAmelCase = ClassificationFunction.NONE
_UpperCAmelCase = model_outputs['logits'][0]
_UpperCAmelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_UpperCAmelCase = sigmoid(snake_case )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_UpperCAmelCase = softmax(snake_case )
elif function_to_apply == ClassificationFunction.NONE:
_UpperCAmelCase = outputs
else:
raise ValueError(f'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_UpperCAmelCase = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(snake_case )
]
if not _legacy:
dict_scores.sort(key=lambda snake_case : x["score"] , reverse=snake_case )
if top_k is not None:
_UpperCAmelCase = dict_scores[:top_k]
return dict_scores
| 704 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> Any:
super().__init__()
_UpperCAmelCase = nn.Linear(3 , 4 )
_UpperCAmelCase = nn.BatchNormad(4 )
_UpperCAmelCase = nn.Linear(4 , 5 )
def lowerCamelCase_ ( self , snake_case ) -> Any:
return self.lineara(self.batchnorm(self.lineara(snake_case ) ) )
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case , *snake_case , **snake_case ) -> int:
return (args[0] + 1,) + args[1:], kwargs
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case , snake_case ) -> str:
return output + 1
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = ModelForTest()
_UpperCAmelCase = ModelHook()
add_hook_to_module(snake_case , snake_case )
self.assertEqual(test_model._hf_hook , snake_case )
self.assertTrue(hasattr(snake_case , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(snake_case )
self.assertFalse(hasattr(snake_case , '_hf_hook' ) )
self.assertFalse(hasattr(snake_case , '_old_forward' ) )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = ModelForTest()
_UpperCAmelCase = ModelHook()
add_hook_to_module(snake_case , snake_case )
add_hook_to_module(snake_case , snake_case , append=snake_case )
self.assertEqual(isinstance(test_model._hf_hook , snake_case ) , snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(snake_case , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(snake_case )
self.assertFalse(hasattr(snake_case , '_hf_hook' ) )
self.assertFalse(hasattr(snake_case , '_old_forward' ) )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = ModelForTest()
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = test_model(x + 1 )
_UpperCAmelCase = test_model(x + 2 )
_UpperCAmelCase = PreForwardHook()
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_UpperCAmelCase = PreForwardHook()
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_UpperCAmelCase = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1E-5 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = ModelForTest()
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = test_model(snake_case )
_UpperCAmelCase = PostForwardHook()
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_UpperCAmelCase = PostForwardHook()
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_UpperCAmelCase = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
assert torch.allclose(snake_case , output + 2 , atol=1E-5 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = ModelForTest()
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = test_model(snake_case )
_UpperCAmelCase = PostForwardHook()
add_hook_to_module(snake_case , snake_case )
_UpperCAmelCase = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_UpperCAmelCase = True
_UpperCAmelCase = test_model(snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(snake_case , AlignDevicesHook(io_same_device=snake_case ) )
_UpperCAmelCase = torch.randn(2 , 3 ).to(0 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_UpperCAmelCase = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_UpperCAmelCase = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , snake_case )
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
_UpperCAmelCase = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_UpperCAmelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(snake_case , execution_device=snake_case , offload=snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_UpperCAmelCase = torch.device(snake_case )
self.assertEqual(model.batchnorm.running_mean.device , snake_case )
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(snake_case , execution_device=snake_case , offload=snake_case , offload_buffers=snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_UpperCAmelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
snake_case , execution_device=snake_case , offload=snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_UpperCAmelCase = torch.device(snake_case )
self.assertEqual(model.batchnorm.running_mean.device , snake_case )
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
snake_case , execution_device=snake_case , offload=snake_case , weights_map=model.state_dict() , offload_buffers=snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_UpperCAmelCase = torch.randn(2 , 3 )
_UpperCAmelCase = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 705 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase__ ( A, A ):
'''simple docstring'''
_UpperCAmelCase = '''swin'''
_UpperCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]:
super().__init__(**snake_case )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(snake_case )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) )
_UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 0 |
"""simple docstring"""
def UpperCAmelCase ( A : Tuple ):
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCAmelCase ( A : dict[int, list[int]] ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = len(A ) # No of vertices in graph
_UpperCAmelCase = [0] * n
_UpperCAmelCase = [False] * n
def dfs(A : Dict , A : List[Any] , A : Any , A : Any ):
_UpperCAmelCase = True
_UpperCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(A , A , A , id_ )
_UpperCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_UpperCAmelCase = min(low[at] , low[to] )
_UpperCAmelCase = []
for i in range(A ):
if not visited[i]:
dfs(A , -1 , A , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str:
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any:
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case )
| 24 | 0 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase = '''<<<<<<< This should probably be modified because it mentions: '''
lowercase = '''=======
>>>>>>>
'''
lowercase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowercase = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def UpperCAmelCase ( A : Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase__ ( A ):
'''simple docstring'''
@staticmethod
def lowerCamelCase_ ( snake_case ) -> str:
_UpperCAmelCase = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=snake_case , required=snake_case , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=snake_case , required=snake_case , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=snake_case )
def __init__( self , snake_case , snake_case , *snake_case ) -> Any:
_UpperCAmelCase = get_logger('datasets-cli/converting' )
_UpperCAmelCase = tfds_path
_UpperCAmelCase = datasets_directory
def lowerCamelCase_ ( self ) -> Optional[Any]:
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_UpperCAmelCase = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
_UpperCAmelCase = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase = os.listdir(snake_case )
else:
_UpperCAmelCase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
_UpperCAmelCase = os.path.join(snake_case , snake_case )
_UpperCAmelCase = os.path.join(snake_case , snake_case )
if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(snake_case , encoding='utf-8' ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = []
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = []
for line in lines:
_UpperCAmelCase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_UpperCAmelCase = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
_UpperCAmelCase = ''
continue
elif "from absl import logging" in out_line:
_UpperCAmelCase = 'from datasets import logging\n'
elif "getLogger" in out_line:
_UpperCAmelCase = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_UpperCAmelCase = True
_UpperCAmelCase = list(filter(lambda snake_case : e in out_line , snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + '\n' )
out_lines.append(snake_case )
out_lines.append(snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
_UpperCAmelCase = re.sub(snake_case , snake_case , snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_UpperCAmelCase = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
_UpperCAmelCase = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_UpperCAmelCase = True
out_lines.append(snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_UpperCAmelCase = f_name.replace('.py' , '' )
_UpperCAmelCase = os.path.join(snake_case , snake_case )
_UpperCAmelCase = os.path.join(snake_case , snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case )
if needs_manual_update:
with_manual_update.append(snake_case )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.writelines(snake_case )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
_UpperCAmelCase = os.path.basename(snake_case )
_UpperCAmelCase = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(snake_case , snake_case )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 707 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) )
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = CvtModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = CvtForImageClassification(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = CvtModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCamelCase_ ( self ) -> str:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCamelCase_ ( self ) -> int:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
def check_hidden_states_output(snake_case , snake_case , snake_case ):
_UpperCAmelCase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(snake_case ) , snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self ) -> Dict:
pass
@slow
def lowerCamelCase_ ( self ) -> Dict:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = CvtModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
_UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 24 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def UpperCAmelCase ( A : str , A : dict ):
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(A , params=A ).content , 'html.parser' )
_UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
_UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowercase = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 20_18,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 708 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( A : int , A : int , A : int ):
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_UpperCAmelCase = b * b - 4 * a * c
_UpperCAmelCase = (-b + sqrt(A )) / (2 * a)
_UpperCAmelCase = (-b - sqrt(A )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = BarthezTokenizer
_UpperCAmelCase = BarthezTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def lowerCamelCase_ ( self ) -> Optional[int]:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case )
_UpperCAmelCase = tokenizer
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case ) , 101122 )
def lowerCamelCase_ ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsรฉ.'
_UpperCAmelCase = tokenizer.tokenize(snake_case )
_UpperCAmelCase = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
# fmt: off
_UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
'Le transformeur est un modรจle d\'apprentissage profond introduit en 2017, '
'utilisรฉ principalement dans le domaine du traitement automatique des langues (TAL).',
'ร l\'instar des rรฉseaux de neurones rรฉcurrents (RNN), les transformeurs sont conรงus '
'pour gรฉrer des donnรฉes sรฉquentielles, telles que le langage naturel, pour des tรขches '
'telles que la traduction et la synthรจse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
| 24 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowercase = ['''small''', '''medium''', '''large''']
lowercase = '''lm_head.decoder.weight'''
lowercase = '''lm_head.weight'''
def UpperCAmelCase ( A : str , A : str ):
'''simple docstring'''
_UpperCAmelCase = torch.load(A )
_UpperCAmelCase = d.pop(A )
os.makedirs(A , exist_ok=A )
torch.save(A , os.path.join(A , A ) )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
lowercase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowercase = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
lowercase = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 710 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = DiTPipeline
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
_UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]:
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = pipe(**snake_case ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1E-3 )
def lowerCamelCase_ ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 24 | 0 |
"""simple docstring"""
import string
from math import logaa
def UpperCAmelCase ( A : str , A : str ):
'''simple docstring'''
_UpperCAmelCase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
_UpperCAmelCase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase ( A : str , A : str ):
'''simple docstring'''
_UpperCAmelCase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split('\n' )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(A ))
def UpperCAmelCase ( A : int , A : int , A : Union[str, Any]=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 711 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase ( A : int ):
'''simple docstring'''
return sum(int(A ) for c in str(abs(A ) ) )
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A : Callable , A : int ) -> None:
_UpperCAmelCase = f'{func.__name__}({value})'
_UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
create_all_state(1 , A , A , [] , A )
return result
def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def UpperCAmelCase ( A : list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*A )
if __name__ == "__main__":
lowercase = 4
lowercase = 2
lowercase = generate_all_combinations(n, k)
print_all_state(total_list)
| 24 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowercase__ ( nn.Module ):
'''simple docstring'''
_UpperCAmelCase = 42
_UpperCAmelCase = jnp.floataa
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case ) -> Optional[Any]:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_states.shape
_UpperCAmelCase = jax.image.resize(
snake_case , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
_UpperCAmelCase = self.conv(snake_case )
return hidden_states
class lowercase__ ( nn.Module ):
'''simple docstring'''
_UpperCAmelCase = 42
_UpperCAmelCase = jnp.floataa
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case ) -> Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_UpperCAmelCase = self.conv(snake_case )
return hidden_states
class lowercase__ ( nn.Module ):
'''simple docstring'''
_UpperCAmelCase = 42
_UpperCAmelCase = None
_UpperCAmelCase = 0.0
_UpperCAmelCase = None
_UpperCAmelCase = jnp.floataa
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.in_channels if self.out_channels is None else self.out_channels
_UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_UpperCAmelCase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase = nn.Dense(snake_case , dtype=self.dtype )
_UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_UpperCAmelCase = nn.Dropout(self.dropout_prob )
_UpperCAmelCase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_UpperCAmelCase = None
if use_nin_shortcut:
_UpperCAmelCase = nn.Conv(
snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case=True ) -> Union[str, Any]:
_UpperCAmelCase = hidden_states
_UpperCAmelCase = self.norma(snake_case )
_UpperCAmelCase = nn.swish(snake_case )
_UpperCAmelCase = self.conva(snake_case )
_UpperCAmelCase = self.time_emb_proj(nn.swish(snake_case ) )
_UpperCAmelCase = jnp.expand_dims(jnp.expand_dims(snake_case , 1 ) , 1 )
_UpperCAmelCase = hidden_states + temb
_UpperCAmelCase = self.norma(snake_case )
_UpperCAmelCase = nn.swish(snake_case )
_UpperCAmelCase = self.dropout(snake_case , snake_case )
_UpperCAmelCase = self.conva(snake_case )
if self.conv_shortcut is not None:
_UpperCAmelCase = self.conv_shortcut(snake_case )
return hidden_states + residual
| 713 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase ( A : Path , A : list ):
'''simple docstring'''
_UpperCAmelCase = '\n'.join(A )
Path(A ).open('w' ).writelines(A )
lowercase = '''patrickvonplaten/t5-tiny-random'''
lowercase = '''sshleifer/bart-tiny-random'''
lowercase = '''sshleifer/tiny-mbart'''
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case ) -> str:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(snake_case , snake_case )
_UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(snake_case , 'argv' , snake_case ):
run_generate()
assert Path(snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase_ ( self ) -> str:
self.run_eval_tester(snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
self.run_eval_tester(snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> Dict:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist groรartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = str(tmp_dir / 'scores.json' )
_UpperCAmelCase = str(tmp_dir / 'val.target' )
_dump_articles(snake_case , text['en'] )
_dump_articles(snake_case , text['de'] )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(snake_case , 'argv' , snake_case ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args']
_UpperCAmelCase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case ).exists()
os.remove(Path(snake_case ) )
| 24 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowercase__ ( A, A ):
'''simple docstring'''
_UpperCAmelCase = '''nat'''
_UpperCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , snake_case=4 , snake_case=3 , snake_case=64 , snake_case=[3, 4, 6, 5] , snake_case=[2, 4, 8, 16] , snake_case=7 , snake_case=3.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=0.02 , snake_case=1E-5 , snake_case=0.0 , snake_case=None , snake_case=None , **snake_case , ) -> List[str]:
super().__init__(**snake_case )
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(snake_case )
_UpperCAmelCase = num_heads
_UpperCAmelCase = kernel_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) )
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
| 714 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase = logging.get_logger(__name__)
lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A )
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
| 24 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def UpperCAmelCase ( A : List[str] ):
'''simple docstring'''
_UpperCAmelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_UpperCAmelCase = DetaConfig(
backbone_config=A , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=A , with_box_refine=A , two_stage=A , )
# set labels
_UpperCAmelCase = 'huggingface/label-files'
if "o365" in model_name:
_UpperCAmelCase = 366
_UpperCAmelCase = 'object365-id2label.json'
else:
_UpperCAmelCase = 91
_UpperCAmelCase = 'coco-detection-id2label.json'
_UpperCAmelCase = num_labels
_UpperCAmelCase = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
_UpperCAmelCase = {int(A ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase ( A : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def UpperCAmelCase ( A : Any , A : List[Any] , A : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = dct.pop(A )
_UpperCAmelCase = val
def UpperCAmelCase ( A : Any , A : List[str] ):
'''simple docstring'''
_UpperCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
_UpperCAmelCase = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:dim, :]
_UpperCAmelCase = in_proj_bias[: dim]
_UpperCAmelCase = in_proj_weight[
dim : dim * 2, :
]
_UpperCAmelCase = in_proj_bias[
dim : dim * 2
]
_UpperCAmelCase = in_proj_weight[
-dim :, :
]
_UpperCAmelCase = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase ( A : Optional[int] , A : int ):
'''simple docstring'''
_UpperCAmelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCAmelCase = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_UpperCAmelCase = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:hidden_size, :]
_UpperCAmelCase = in_proj_bias[:hidden_size]
_UpperCAmelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCAmelCase = in_proj_weight[-hidden_size:, :]
_UpperCAmelCase = in_proj_bias[-hidden_size:]
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( A : Dict , A : List[str] , A : List[str] ):
'''simple docstring'''
_UpperCAmelCase = get_deta_config(A )
# load original state dict
if model_name == "deta-swin-large":
_UpperCAmelCase = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_UpperCAmelCase = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'Model name {model_name} not supported' )
_UpperCAmelCase = torch.load(A , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(A , param.shape )
# rename keys
_UpperCAmelCase = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_swin_q_k_v(A , config.backbone_config )
read_in_decoder_q_k_v(A , A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCAmelCase = state_dict.pop(A )
_UpperCAmelCase = val
if "input_proj" in key:
_UpperCAmelCase = state_dict.pop(A )
_UpperCAmelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCAmelCase = state_dict.pop(A )
_UpperCAmelCase = val
# finally, create HuggingFace model and load state dict
_UpperCAmelCase = DetaForObjectDetection(A )
model.load_state_dict(A )
model.eval()
_UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(A )
# load image processor
_UpperCAmelCase = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = processor(images=A , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = model(pixel_values.to(A ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCAmelCase = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
_UpperCAmelCase = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
_UpperCAmelCase = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
_UpperCAmelCase = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(A ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(A ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
processor.save_pretrained(A )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the ๐ค hub.'''
)
lowercase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 715 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = generator('Something there' )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
_UpperCAmelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
with self.assertRaises(snake_case ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
_UpperCAmelCase = 3
_UpperCAmelCase = generator(
'Something there' , num_return_sequences=snake_case , num_beams=snake_case , )
_UpperCAmelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case , snake_case )
_UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case )
self.assertEqual(
snake_case , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
_UpperCAmelCase = generator.model.config.eos_token_id
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = generator(
['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , )
self.assertEqual(
snake_case , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
| 24 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=2 , snake_case=True , snake_case=False , snake_case=10 , snake_case=3 , snake_case=32 * 4 , snake_case=32 * 6 , snake_case=4 , snake_case=32 , ) -> Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = is_training
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = num_queries
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_size
_UpperCAmelCase = max_size
_UpperCAmelCase = num_labels
_UpperCAmelCase = mask_feature_size
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case )
_UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case )
_UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case ) > 0.5
).float()
_UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case ) > 0.5).long()
_UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase_ ( self ) -> Dict:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = output.encoder_hidden_states
_UpperCAmelCase = output.pixel_decoder_hidden_states
_UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case ) , config.decoder_config.decoder_layers )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case=False ) -> int:
with torch.no_grad():
_UpperCAmelCase = MaskFormerModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(pixel_values=snake_case , pixel_mask=snake_case )
_UpperCAmelCase = model(snake_case , output_hidden_states=snake_case )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case , snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ) -> str:
_UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case )
model.to(snake_case )
model.eval()
def comm_check_on_output(snake_case ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase = model(pixel_values=snake_case , pixel_mask=snake_case )
_UpperCAmelCase = model(snake_case )
comm_check_on_output(snake_case )
_UpperCAmelCase = model(
pixel_values=snake_case , pixel_mask=snake_case , mask_labels=snake_case , class_labels=snake_case )
comm_check_on_output(snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = MaskFormerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case , **snake_case , output_hidden_states=snake_case )
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def lowerCamelCase_ ( self ) -> str:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def lowerCamelCase_ ( self ) -> str:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def lowerCamelCase_ ( self ) -> Any:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def lowerCamelCase_ ( self ) -> Tuple:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCamelCase_ ( self ) -> List[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
@slow
def lowerCamelCase_ ( self ) -> Optional[Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = (self.model_tester.min_size,) * 2
_UpperCAmelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=snake_case ),
'mask_labels': torch.randn((2, 10, *size) , device=snake_case ),
'class_labels': torch.zeros(2 , 10 , device=snake_case ).long(),
}
_UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case )
_UpperCAmelCase = model(**snake_case )
self.assertTrue(outputs.loss is not None )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case , **snake_case , output_hidden_states=snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case ).to(snake_case )
_UpperCAmelCase = model(**snake_case , output_attentions=snake_case )
self.assertTrue(outputs.attentions is not None )
def lowerCamelCase_ ( self ) -> Any:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = model_class(snake_case )
model.to(snake_case )
model.train()
_UpperCAmelCase = model(snake_case , mask_labels=snake_case , class_labels=snake_case ).loss
loss.backward()
def lowerCamelCase_ ( self ) -> Dict:
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case )
model.to(snake_case )
model.train()
_UpperCAmelCase = model(snake_case , mask_labels=snake_case , class_labels=snake_case )
_UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase = 1E-4
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self ) -> List[Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(snake_case )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case , return_tensors='pt' ).to(snake_case )
_UpperCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
_UpperCAmelCase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case ) )
_UpperCAmelCase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case ) )
_UpperCAmelCase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case , atol=snake_case ) )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(snake_case )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case , return_tensors='pt' ).to(snake_case )
_UpperCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
_UpperCAmelCase = torch.tensor(snake_case ).to(snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case , atol=snake_case ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case , atol=snake_case ) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(snake_case )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(snake_case , return_tensors='pt' ).to(snake_case )
_UpperCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case , (1, 3, 800, 1088) )
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
# masks_queries_logits
_UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_UpperCAmelCase = torch.tensor(snake_case ).to(snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case , atol=snake_case ) )
# class_queries_logits
_UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case , atol=snake_case ) )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(snake_case )
.eval()
)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
_UpperCAmelCase = inputs['pixel_values'].to(snake_case )
_UpperCAmelCase = [el.to(snake_case ) for el in inputs['mask_labels']]
_UpperCAmelCase = [el.to(snake_case ) for el in inputs['class_labels']]
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
self.assertTrue(outputs.loss is not None )
| 716 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_UpperCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowercase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
_UpperCAmelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
| 717 |
"""simple docstring"""
import os
lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(A ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase ( A : str = "/p089_roman.txt" ):
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(A ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(A )
_UpperCAmelCase = generate_roman_numerals(A )
savings += len(A ) - len(A )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowercase = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
_UpperCAmelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
| 24 | 0 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = GPTSanJapaneseTokenizer
_UpperCAmelCase = False
_UpperCAmelCase = {'''do_clean_text''': False, '''add_prefix_space''': False}
def lowerCamelCase_ ( self ) -> List[str]:
super().setUp()
# fmt: off
_UpperCAmelCase = ['ใใ', 'ใใใซ', 'ใซใกใฏ', 'ใฐใใฏ', 'ไธ็,ใบ็', 'ใ', 'ใ', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
_UpperCAmelCase = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # ๐
_UpperCAmelCase = {'unk_token': '<unk>'}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case ) )
def lowerCamelCase_ ( self , **snake_case ) -> Tuple:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
_UpperCAmelCase = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใใบ็ใ๐'
_UpperCAmelCase = 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ๐'
return input_text, output_text
def lowerCamelCase_ ( self , snake_case ) -> List[str]:
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
return text, ids
def lowerCamelCase_ ( self ) -> int:
pass # TODO add if relevant
def lowerCamelCase_ ( self ) -> Dict:
pass # TODO add if relevant
def lowerCamelCase_ ( self ) -> List[Any]:
pass # TODO add if relevant
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = self.get_tokenizer()
# Testing tokenization
_UpperCAmelCase = 'ใใใซใกใฏใไธ็ใใใใใฐใใฏใใบ็ใ'
_UpperCAmelCase = ['ใใ', 'ใซใกใฏ', 'ใ', 'ไธ็', 'ใ', '<SP>', 'ใใ', 'ใฐใใฏ', 'ใ', 'ใบ็', 'ใ']
_UpperCAmelCase = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
# Testing conversion to ids without special tokens
_UpperCAmelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(snake_case , snake_case )
# Testing conversion to ids with special tokens
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.get_tokenizer()
# Testing tokenization
_UpperCAmelCase = 'ใใใซใกใฏใ<|bagoftoken|>ไธ็ใใใใฐใใฏใ<|bagoftoken|>ใบ็ใ'
_UpperCAmelCase = 'ใใใซใกใฏใใใใไธ็ใใใใฐใใฏใใใใไธ็ใ'
_UpperCAmelCase = tokenizer.encode(snake_case )
_UpperCAmelCase = tokenizer.decode(snake_case )
self.assertEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
_UpperCAmelCase = 'ใใใซใกใฏใไธ็ใ'
_UpperCAmelCase = 'ใใใฐใใฏใใบ็ใ๐'
_UpperCAmelCase = 'ใใใซใกใฏใไธ็ใใใใฐใใฏใไธ็ใ๐'
_UpperCAmelCase = tokenizer.encode(prefix_text + input_text )
_UpperCAmelCase = tokenizer.encode('' , prefix_text=prefix_text + input_text )
_UpperCAmelCase = tokenizer.encode(snake_case , prefix_text=snake_case )
_UpperCAmelCase = tokenizer.decode(snake_case )
_UpperCAmelCase = tokenizer.decode(snake_case )
_UpperCAmelCase = tokenizer.decode(snake_case )
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
_UpperCAmelCase = 'ใใใซใกใฏใไธ็ใ'
_UpperCAmelCase = 'ใใใฐใใฏใใบ็ใ๐'
_UpperCAmelCase = len(tokenizer.encode(snake_case ) ) - 2
_UpperCAmelCase = len(tokenizer.encode(snake_case ) ) - 2
_UpperCAmelCase = [1] + [0] * (len_prefix + len_text + 1)
_UpperCAmelCase = [1] * (len_prefix + len_text + 1) + [0]
_UpperCAmelCase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_UpperCAmelCase = tokenizer(prefix_text + input_text ).token_type_ids
_UpperCAmelCase = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
_UpperCAmelCase = tokenizer(snake_case , prefix_text=snake_case ).token_type_ids
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
_UpperCAmelCase = tokenizer.encode('ใใณใใฏ' )
_UpperCAmelCase = tokenizer.encode('' , prefix_text='ใใณใใฏ' )
_UpperCAmelCase = tokenizer.encode('ใใฏ' , prefix_text='ใใณ' )
self.assertEqual(tokenizer.decode(snake_case ) , tokenizer.decode(snake_case ) )
self.assertEqual(tokenizer.decode(snake_case ) , tokenizer.decode(snake_case ) )
self.assertNotEqual(snake_case , snake_case )
self.assertNotEqual(snake_case , snake_case )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
_UpperCAmelCase = [['ๆญฆ็ฐไฟก็', 'ใฏใ'], ['็น็ฐไฟก้ท', 'ใฎ้
ไธใฎใ']]
_UpperCAmelCase = tokenizer(snake_case , padding=snake_case )
_UpperCAmelCase = tokenizer.batch_encode_plus(snake_case , padding=snake_case )
# fmt: off
_UpperCAmelCase = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
_UpperCAmelCase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_UpperCAmelCase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case )
self.assertListEqual(x_token.token_type_ids , snake_case )
self.assertListEqual(x_token.attention_mask , snake_case )
self.assertListEqual(x_token_a.input_ids , snake_case )
self.assertListEqual(x_token_a.token_type_ids , snake_case )
self.assertListEqual(x_token_a.attention_mask , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase_ ( self ) -> List[str]:
# tokenizer has no padding token
pass
| 719 |
"""simple docstring"""
import os
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' )
with open(A ) as file_hand:
return str(sum(int(A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( A : int , A : int , A : int ):
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_UpperCAmelCase = b * b - 4 * a * c
_UpperCAmelCase = (-b + sqrt(A )) / (2 * a)
_UpperCAmelCase = (-b - sqrt(A )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 720 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = DiTPipeline
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
_UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]:
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = pipe(**snake_case ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1E-3 )
def lowerCamelCase_ ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 721 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase = logging.get_logger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , *snake_case , **snake_case ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 24 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
print("Loading config file..." )
def flatten_yaml_as_dict(snake_case_ : str , snake_case_ : Dict="" , snake_case_ : List[Any]="." ):
snake_case__ : Tuple = []
for k, v in d.items():
snake_case__ : int = parent_key + sep + k if parent_key else k
if isinstance(snake_case_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_ , snake_case_ , sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
snake_case__ : int = argparse.Namespace()
with open(snake_case_ , "r" ) as yaml_file:
try:
snake_case__ : Dict = yaml.load(snake_case_ , Loader=yaml.FullLoader )
snake_case__ : Any = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_ , snake_case_ , snake_case_ )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(snake_case_ , str(snake_case_ ) ) )
return config
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : str ):
snake_case__ : List[Any] = MobileViTVaConfig()
snake_case__ : Optional[int] = False
# dataset
if task_name.startswith("imagenet1k_" ):
snake_case__ : List[Any] = 1000
if int(task_name.strip().split("_" )[-1] ) == 384:
snake_case__ : Any = 384
else:
snake_case__ : int = 256
snake_case__ : Union[str, Any] = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
snake_case__ : str = 21000
if int(task_name.strip().split("_" )[-1] ) == 384:
snake_case__ : Union[str, Any] = 384
else:
snake_case__ : str = 256
snake_case__ : List[Any] = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
snake_case__ : Tuple = 151
snake_case__ : Union[str, Any] = 512
snake_case__ : Optional[int] = "ade20k-id2label.json"
snake_case__ : int = True
elif task_name.startswith("voc_" ):
snake_case__ : List[Any] = 21
snake_case__ : Union[str, Any] = 512
snake_case__ : int = "pascal-voc-id2label.json"
snake_case__ : Dict = True
# orig_config
snake_case__ : Optional[Any] = load_orig_config_file(snake_case_ )
assert getattr(snake_case_ , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
snake_case__ : int = getattr(snake_case_ , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(snake_case_ , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
snake_case__ : Union[str, Any] = getattr(snake_case_ , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
snake_case__ : Any = getattr(snake_case_ , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
snake_case__ : List[str] = getattr(snake_case_ , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
snake_case__ : List[Any] = getattr(snake_case_ , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
snake_case__ : Union[str, Any] = getattr(snake_case_ , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
snake_case__ : Optional[int] = "huggingface/label-files"
snake_case__ : Any = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
snake_case__ : Tuple = {int(snake_case_ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ):
snake_case__ : int = dct.pop(snake_case_ )
snake_case__ : List[str] = val
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : str=False ):
if base_model:
snake_case__ : Optional[int] = ""
else:
snake_case__ : Optional[int] = "mobilevitv2."
snake_case__ : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
snake_case__ : Tuple = k[8:]
else:
snake_case__ : Union[str, Any] = k
if ".block." in k:
snake_case__ : List[Any] = k_new.replace(".block." , "." )
if ".conv." in k:
snake_case__ : Optional[int] = k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
snake_case__ : str = k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
snake_case__ : int = k_new.replace("conv_1." , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
snake_case__ : int = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
snake_case__ : List[Any] = k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
snake_case__ : Any = k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
snake_case__ : int = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
snake_case__ : Optional[Any] = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
snake_case__ : Dict = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
snake_case__ : int = [0, 1]
elif i == 4:
snake_case__ : List[str] = [0, 1, 2, 3]
elif i == 5:
snake_case__ : Optional[Any] = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
snake_case__ : Any = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
snake_case__ : str = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
snake_case__ : List[str] = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
snake_case__ : Any = k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
snake_case__ : str = k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
snake_case__ : Union[str, Any] = k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
snake_case__ : Union[str, Any] = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
snake_case__ : Dict = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
snake_case__ : str = k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
snake_case__ : Dict = k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
snake_case__ : Dict = k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
snake_case__ : int = k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
snake_case__ : Optional[int] = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
snake_case__ : Optional[Any] = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int , snake_case_ : Tuple , snake_case_ : Dict ):
snake_case__ : List[Any] = get_mobilevitva_config(snake_case_ , snake_case_ )
# load original state_dict
snake_case__ : List[Any] = torch.load(snake_case_ , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
snake_case__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
snake_case__ : List[str] = False
else:
snake_case__ : int = MobileViTVaForImageClassification(snake_case_ ).eval()
snake_case__ : Tuple = False
# remove and rename some keys of load the original model
snake_case__ : List[str] = checkpoint
remove_unused_keys(snake_case_ )
snake_case__ : List[str] = create_rename_keys(snake_case_ , base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
snake_case__ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
snake_case__ : List[str] = image_processor(images=prepare_img() , return_tensors="pt" )
snake_case__ : str = model(**snake_case_ )
# verify classification model
if task_name.startswith("imagenet" ):
snake_case__ : List[Any] = outputs.logits
snake_case__ : int = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
snake_case__ : Union[str, Any] = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
__lowerCamelCase : Optional[Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 25 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : str = [True] * limit
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case__ : Optional[Any] = i * 2
while index < limit:
snake_case__ : Union[str, Any] = False
snake_case__ : Any = index + i
snake_case__ : Optional[Any] = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ):
snake_case__ : Optional[int] = prime_sieve(snake_case_ )
snake_case__ : List[Any] = 0
snake_case__ : List[str] = 0
for i in range(len(snake_case_ ) ):
for j in range(i + length , len(snake_case_ ) ):
snake_case__ : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case__ : Tuple = j - i
snake_case__ : str = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 25 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.