code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Any =get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_lowercase : List[str] =5_0_0_0_3
_lowercase : Optional[Any] =5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Union[str, Any] = PLBartTokenizer
lowercase : Optional[Any] = None
lowercase : Tuple = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A : Optional[Any] =PLBartTokenizer(_snake_case , language_codes='base' , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
A : Optional[Any] =PLBartTokenizer(_snake_case , language_codes='base' , keep_accents=_snake_case )
A : Dict =tokenizer.tokenize('This is a test' )
self.assertListEqual(_snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : str =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : Union[str, Any] =tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A : int =tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
A : int =tokenizer.vocab_size
A : Optional[int] =[tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 4 , _snake_case )]
self.assertListEqual(_snake_case , ['__java__', '__python__', '__en_XX__', '<mask>'] )
A : Tuple ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
A : List[Any] =tokenizer(_snake_case ).input_ids
self.assertEqual(
tokenizer.decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) , _snake_case , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
A : Tuple =PLBartTokenizer(_snake_case , language_codes='multi' , keep_accents=_snake_case )
A : List[Any] =tokenizer.tokenize('This is a test' )
self.assertListEqual(_snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : List[str] =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : List[str] =tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A : List[Any] =tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
A : Dict =tokenizer.vocab_size
A : Optional[int] =[tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 7 , _snake_case )]
self.assertListEqual(
_snake_case , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
A : int ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
A : Dict =tokenizer(_snake_case ).input_ids
self.assertEqual(
tokenizer.decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) , _snake_case , )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = "uclanlp/plbart-python-en_XX"
lowercase : Optional[Any] = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
lowercase : Dict = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
lowercase : Tuple = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict ) -> Tuple:
A : int =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
A : str =1
return cls
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_00_03 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
A : Any =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> int:
self.assertIn(_snake_case , self.tokenizer.all_special_ids )
A : List[str] =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
A : int =self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
A : int =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
self.assertNotIn(self.tokenizer.eos_token , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
A : Any =['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , _snake_case )
A : List[str] =10
A : Any =self.tokenizer(_snake_case , max_length=_snake_case , truncation=_snake_case ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _snake_case )
self.assertEqual(len(_snake_case ) , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_00_04, 5_00_01] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Union[str, Any]:
A : int =tempfile.mkdtemp()
A : Optional[int] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_snake_case )
A : Union[str, Any] =PLBartTokenizer.from_pretrained(_snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _snake_case )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
A : Tuple =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='pt' )
A : Any =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _snake_case )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
A : Dict =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
A : List[str] =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
A : Union[str, Any] =self.tokenizer(self.src_text , padding=_snake_case , truncation=_snake_case , max_length=3 , return_tensors='pt' )
A : Tuple =self.tokenizer(
text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=10 , return_tensors='pt' )
A : Optional[Any] =targets['input_ids']
A : Dict =shift_tokens_right(_snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : int =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(_snake_case ) , {
# A, test, EOS, en_XX
'input_ids': [[1_50, 2_42, 2, 5_00_03]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_00_01,
} , )
| 708 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict ={
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "xglm"
lowercase : Any = ["past_key_values"]
lowercase : Dict = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=25_60_08 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : str=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
A : str =vocab_size
A : Union[str, Any] =max_position_embeddings
A : Optional[Any] =d_model
A : Optional[int] =ffn_dim
A : int =num_layers
A : Any =attention_heads
A : Dict =activation_function
A : List[Any] =dropout
A : str =attention_dropout
A : List[Any] =activation_dropout
A : List[Any] =layerdrop
A : List[Any] =init_std
A : Union[str, Any] =scale_embedding # scale factor will be sqrt(d_model) if True
A : List[str] =use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 661 | 0 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_lowercase : Optional[Any] =logging.get_logger(__name__)
_lowercase : Union[str, Any] =TypeVar('''DatasetType''', Dataset, IterableDataset)
def A__ ( lowercase: Tuple, lowercase: Dict = None, lowercase: str = None, lowercase: Optional[int] = None, lowercase: int = None, lowercase: List[str] = "first_exhausted", ) -> Optional[int]:
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase, (Dataset, IterableDataset) ):
if isinstance(lowercase, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowercase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowercase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}.' )
if i == 0:
A , A : List[str] =(
(Dataset, IterableDataset) if isinstance(lowercase, lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase, lowercase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowercase, lowercase, lowercase, info=lowercase, split=lowercase, stopping_strategy=lowercase )
else:
return _interleave_iterable_datasets(
lowercase, lowercase, lowercase, info=lowercase, split=lowercase, stopping_strategy=lowercase )
def A__ ( lowercase: Tuple, lowercase: Union[str, Any] = None, lowercase: Tuple = None, lowercase: Any = 0, ) -> Union[str, Any]:
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase, (Dataset, IterableDataset) ):
if isinstance(lowercase, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowercase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowercase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}.' )
if i == 0:
A , A : str =(
(Dataset, IterableDataset) if isinstance(lowercase, lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase, lowercase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowercase, info=lowercase, split=lowercase, axis=lowercase )
else:
return _concatenate_iterable_datasets(lowercase, info=lowercase, split=lowercase, axis=lowercase )
| 709 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase : List[str] ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def A__ ( ) -> List[Any]:
A : Any =_ask_options(
'In which compute environment are you running?', ['This machine', 'AWS (Amazon SageMaker)'], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A : Tuple =get_sagemaker_input()
else:
A : str =get_cluster_input()
return config
def A__ ( lowercase: int=None ) -> str:
if subparsers is not None:
A : List[str] =subparsers.add_parser('config', description=lowercase )
else:
A : Union[str, Any] =argparse.ArgumentParser('Accelerate config command', description=lowercase )
parser.add_argument(
'--config_file', default=lowercase, help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
), )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def A__ ( lowercase: Tuple ) -> List[Any]:
A : Union[str, Any] =get_user_input()
if args.config_file is not None:
A : Optional[Any] =args.config_file
else:
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
A : Union[str, Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase )
else:
config.to_yaml_file(lowercase )
print(F'accelerate configuration saved at {config_file}' )
def A__ ( ) -> Optional[int]:
A : Any =config_command_parser()
A : int =parser.parse_args()
config_command(lowercase )
if __name__ == "__main__":
main()
| 661 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
'''simple docstring'''
lowercase : List[Any] = 42
lowercase : List[str] = 42
lowercase : str = None
class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
lowercase : Optional[Any] = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] = 0.0_2 , SCREAMING_SNAKE_CASE__ : Dict = 1_00 , SCREAMING_SNAKE_CASE__ : Dict = 1.0_0_7 , SCREAMING_SNAKE_CASE__ : Optional[Any] = 80 , SCREAMING_SNAKE_CASE__ : int = 0.0_5 , SCREAMING_SNAKE_CASE__ : str = 50 , ) -> str:
# standard deviation of the initial noise distribution
A : Tuple =sigma_max
# setable values
A : int =None
A : np.IntTensor =None
A : torch.FloatTensor =None # sigma(t_i)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict = None ) -> torch.FloatTensor:
return sample
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] = None ) -> int:
A : List[Any] =num_inference_steps
A : int =np.arange(0 , self.num_inference_steps )[::-1].copy()
A : Tuple =torch.from_numpy(_a ).to(_a )
A : Tuple =[
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A : List[str] =torch.tensor(_a , dtype=torch.floataa , device=_a )
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
A : Tuple =min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A : str =0
# sample eps ~ N(0, S_noise^2 * I)
A : Optional[Any] =self.config.s_noise * randn_tensor(sample.shape , generator=_a ).to(sample.device )
A : Union[str, Any] =sigma + gamma * sigma
A : str =sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] = True , ) -> Union[KarrasVeOutput, Tuple]:
A : Any =sample_hat + sigma_hat * model_output
A : Optional[Any] =(sample_hat - pred_original_sample) / sigma_hat
A : Optional[int] =sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_a , derivative=_a , pred_original_sample=_a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple = True , ) -> Union[KarrasVeOutput, Tuple]:
A : Union[str, Any] =sample_prev + sigma_prev * model_output
A : List[str] =(sample_prev - pred_original_sample) / sigma_prev
A : Union[str, Any] =sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_a , derivative=_a , pred_original_sample=_a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
raise NotImplementedError()
| 710 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[str] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : Dict =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : List[Any] =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Tuple =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : Dict =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : List[Any] =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : Optional[int] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : Any =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[Any] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Optional[Any] =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : List[Any] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: Dict ) -> int:
if _re_test_backend.search(lowercase ) is None:
return None
A : Any =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Any ) -> List[Any]:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : Optional[Any] =f.readlines()
A : Dict =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Optional[int] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : int =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : int =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : int =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : Optional[int] =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : Dict =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : str ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Optional[int] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : Optional[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : Optional[Any] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : int =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : Optional[int] =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : Optional[int] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : Optional[Any] =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : Any =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Optional[Any] ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : str =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Optional[Any] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : Any =lines[line_index]
A : Any =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Dict =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Any, lowercase: int ) -> Dict:
def find_duplicates(lowercase: List[str] ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : List[Any] =[]
for key in import_dict_objects.keys():
A : List[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : Tuple =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> List[str]:
A : Dict =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Any =os.path.join(lowercase, '__init__.py' )
A : Union[str, Any] =parse_init(lowercase )
if objects is not None:
A : str =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Any =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> int:
A : List[str] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : Any =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : List[str] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : Optional[Any] =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : Dict =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Tuple =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> Tuple:
# This is to make sure the transformers module imported is the one in the repo.
A : str =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : Any =spec.loader.load_module()
A : Any =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : Dict ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 661 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
A : Optional[Any] ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
A : Optional[Any] =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A : str ={
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
A : Dict ={
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
A : Optional[int] =tempfile.mkdtemp()
A : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A : str =os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '\n' )
# load decoder from hub
A : Any ='''hf-internal-testing/ngram-beam-search-decoder'''
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
A : int =self.add_kwargs_tokens_map.copy()
kwargs.update(UpperCamelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
A : Any =self.get_tokenizer()
A : Any =self.get_feature_extractor()
A : List[str] =self.get_decoder()
A : Dict =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
A : Union[str, Any] =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> str:
A : str =WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
A : Tuple =WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
A : List[str] =self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(UpperCamelCase__ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=UpperCamelCase__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : str =self.get_feature_extractor()
A : Dict =self.get_tokenizer()
A : str =self.get_decoder()
A : Optional[Any] =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A : Tuple =floats_list((3, 10_00) )
A : List[Any] =feature_extractor(UpperCamelCase__ , return_tensors='np' )
A : int =processor(UpperCamelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
A : List[Any] =self.get_feature_extractor()
A : Optional[Any] =self.get_tokenizer()
A : List[Any] =self.get_decoder()
A : Dict =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A : Union[str, Any] ='''This is a test string'''
A : Optional[Any] =processor(text=UpperCamelCase__ )
A : str =tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=(2, 10, 16) , SCREAMING_SNAKE_CASE__ : int=77 ) -> Tuple:
np.random.seed(UpperCamelCase__ )
return np.random.rand(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
A : Optional[int] =self.get_feature_extractor()
A : Dict =self.get_tokenizer()
A : Optional[Any] =self.get_decoder()
A : int =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A : Any =self._get_dummy_logits(shape=(10, 16) , seed=13 )
A : str =processor.decode(UpperCamelCase__ )
A : List[Any] =decoder.decode_beams(UpperCamelCase__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> str:
A : int =self.get_feature_extractor()
A : int =self.get_tokenizer()
A : int =self.get_decoder()
A : str =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A : Optional[Any] =self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
A : Tuple =processor.batch_decode(UpperCamelCase__ )
else:
with get_context(UpperCamelCase__ ).Pool() as pool:
A : List[Any] =processor.batch_decode(UpperCamelCase__ , UpperCamelCase__ )
A : Tuple =list(UpperCamelCase__ )
with get_context('fork' ).Pool() as p:
A : str =decoder.decode_beams_batch(UpperCamelCase__ , UpperCamelCase__ )
A : int =[], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(UpperCamelCase__ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(UpperCamelCase__ , decoded_processor.logit_score )
self.assertListEqual(UpperCamelCase__ , decoded_processor.lm_score )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
A : List[Any] =self.get_feature_extractor()
A : List[str] =self.get_tokenizer()
A : Dict =self.get_decoder()
A : Optional[Any] =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A : Optional[int] =self._get_dummy_logits()
A : Dict =15
A : Union[str, Any] =-20.0
A : List[Any] =-4.0
A : List[Any] =processor.batch_decode(
UpperCamelCase__ , beam_width=UpperCamelCase__ , beam_prune_logp=UpperCamelCase__ , token_min_logp=UpperCamelCase__ , )
A : Optional[Any] =decoded_processor_out.text
A : str =list(UpperCamelCase__ )
with get_context('fork' ).Pool() as pool:
A : Dict =decoder.decode_beams_batch(
UpperCamelCase__ , UpperCamelCase__ , beam_width=UpperCamelCase__ , beam_prune_logp=UpperCamelCase__ , token_min_logp=UpperCamelCase__ , )
A : List[str] =[d[0][0] for d in decoded_decoder_out]
A : Any =[d[0][2] for d in decoded_decoder_out]
A : List[Any] =[d[0][3] for d in decoded_decoder_out]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , UpperCamelCase__ )
self.assertTrue(np.array_equal(UpperCamelCase__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , UpperCamelCase__ , atol=1e-3 ) )
self.assertTrue(np.array_equal(UpperCamelCase__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , UpperCamelCase__ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
A : Optional[int] =self.get_feature_extractor()
A : Tuple =self.get_tokenizer()
A : Optional[int] =self.get_decoder()
A : List[Any] =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A : int =self._get_dummy_logits()
A : Optional[Any] =2.0
A : Union[str, Any] =5.0
A : Tuple =-20.0
A : Tuple =True
A : int =processor.batch_decode(
UpperCamelCase__ , alpha=UpperCamelCase__ , beta=UpperCamelCase__ , unk_score_offset=UpperCamelCase__ , lm_score_boundary=UpperCamelCase__ , )
A : Any =decoded_processor_out.text
A : int =list(UpperCamelCase__ )
decoder.reset_params(
alpha=UpperCamelCase__ , beta=UpperCamelCase__ , unk_score_offset=UpperCamelCase__ , lm_score_boundary=UpperCamelCase__ , )
with get_context('fork' ).Pool() as pool:
A : Optional[int] =decoder.decode_beams_batch(
UpperCamelCase__ , UpperCamelCase__ , )
A : List[str] =[d[0][0] for d in decoded_decoder_out]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , UpperCamelCase__ )
A : Optional[int] =processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
A : str =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
A : Any =processor.decoder.model_container[processor.decoder._model_key]
A : Optional[Any] =Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
A : Optional[Any] =os.listdir(UpperCamelCase__ )
A : Tuple =['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Optional[Any] =snapshot_download('hf-internal-testing/processor_with_lm' )
A : Dict =WavaVecaProcessorWithLM.from_pretrained(UpperCamelCase__ )
A : List[str] =processor.decoder.model_container[processor.decoder._model_key]
A : int =Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
A : Tuple =os.listdir(UpperCamelCase__ )
A : int =os.listdir(UpperCamelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
A : Dict =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
A : Any =AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
A : Dict =floats_list((3, 10_00) )
A : List[Any] =processor_wavaveca(UpperCamelCase__ , return_tensors='np' )
A : Tuple =processor_auto(UpperCamelCase__ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
A : int =self._get_dummy_logits()
A : str =processor_wavaveca.batch_decode(UpperCamelCase__ )
A : Union[str, Any] =processor_auto.batch_decode(UpperCamelCase__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
A : Any =self.get_feature_extractor()
A : List[str] =self.get_tokenizer()
A : int =self.get_decoder()
A : List[Any] =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
A : Tuple =[d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
A : str =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
A : str =self._get_dummy_logits()[0]
A : str =processor.decode(UpperCamelCase__ , output_word_offsets=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
A : Union[str, Any] =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
A : Optional[int] =self._get_dummy_logits()
A : Any =processor.batch_decode(UpperCamelCase__ , output_word_offsets=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(UpperCamelCase__ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
import torch
A : Any =load_dataset('common_voice' , 'en' , split='train' , streaming=UpperCamelCase__ )
A : str =ds.cast_column('audio' , datasets.Audio(sampling_rate=1_60_00 ) )
A : int =iter(UpperCamelCase__ )
A : Dict =next(UpperCamelCase__ )
A : str =AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
A : Optional[Any] =WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
A : Union[str, Any] =processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
A : Union[str, Any] =model(UpperCamelCase__ ).logits.cpu().numpy()
A : Tuple =processor.decode(logits[0] , output_word_offsets=UpperCamelCase__ )
A : str =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
A : List[Any] =[
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
A : Union[str, Any] ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(' '.join(self.get_from_offsets(UpperCamelCase__ , 'word' ) ) , UpperCamelCase__ )
self.assertEqual(' '.join(self.get_from_offsets(UpperCamelCase__ , 'word' ) ) , output.text )
# output times
A : List[str] =torch.tensor(self.get_from_offsets(UpperCamelCase__ , 'start_time' ) )
A : Tuple =torch.tensor(self.get_from_offsets(UpperCamelCase__ , 'end_time' ) )
# fmt: off
A : str =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
A : List[str] =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=0.0_1 ) )
| 711 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : Any =logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
lowercase : Optional[str] = field(
default="linear" , metadata={"help": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 661 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_lowercase , _lowercase , _lowercase : List[Any] =False, False, False
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : Optional[int] = None
lowercase : bool = True
lowercase : bool = True
lowercase : Optional[str] = None
# Automatically constructed
lowercase : ClassVar[str] = "dict"
lowercase : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowercase : str = field(default="Audio" , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def __call__( self : str ) -> Optional[int]:
return self.pa_type
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
A : Tuple =BytesIO()
sf.write(_lowerCamelCase , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
A : List[str] =np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
A : Dict =np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 3_27_67
A : Any =BytesIO(bytes() )
sf.write(_lowerCamelCase , _lowerCamelCase , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None ) -> dict:
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
A , A : str =(value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
A : List[Any] =xsplitext(_lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
A : Any =token_per_repo_id or {}
A : Dict =path.split('::' )[-1]
try:
A : List[str] =string_to_dict(_lowerCamelCase , config.HUB_DATASETS_URL )['repo_id']
A : Optional[int] =token_per_repo_id[repo_id]
except (ValueError, KeyError):
A : str =None
with xopen(_lowerCamelCase , 'rb' , use_auth_token=_lowerCamelCase ) as f:
A , A : Optional[int] =sf.read(_lowerCamelCase )
else:
A , A : Union[str, Any] =sf.read(_lowerCamelCase )
A : Union[str, Any] =array.T
if self.mono:
A : Optional[int] =librosa.to_mono(_lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
A : Tuple =librosa.resample(_lowerCamelCase , orig_sr=_lowerCamelCase , target_sr=self.sampling_rate )
A : Dict =self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
A : Optional[Any] =pa.array([None] * len(_lowerCamelCase ) , type=pa.binary() )
A : List[Any] =pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A : Tuple =pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
A : List[Any] =pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
A : Tuple =pa.array([Audio().encode_example(_lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
A : Any =storage.field('bytes' )
else:
A : int =pa.array([None] * len(_lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
A : Union[str, Any] =storage.field('path' )
else:
A : int =pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
A : List[Any] =pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(_lowerCamelCase , self.pa_type )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
with xopen(_lowerCamelCase , 'rb' ) as f:
A : Any =f.read()
return bytes_
A : Any =pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A : Optional[Any] =pa.array(
[os.path.basename(_lowerCamelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
A : Union[str, Any] =pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_lowerCamelCase , self.pa_type )
| 712 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : int =2
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
A , A , A , A : Optional[Any] =bos, unk, pad, eos
A : Dict =[]
A : Union[str, Any] =[]
A : Any ={}
A : int =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : Any =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return self.indices == other.indices
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
A : Union[str, Any] =cls()
d.add_from_file(SCREAMING_SNAKE_CASE__ )
return d
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if word in self.indices and not overwrite:
A : int =self.indices[word]
A : Union[str, Any] =self.count[idx] + n
return idx
else:
A : Tuple =len(self.symbols )
A : str =idx
self.symbols.append(SCREAMING_SNAKE_CASE__ )
self.count.append(SCREAMING_SNAKE_CASE__ )
return idx
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE__ ) )
return
A : str =f.readlines()
A : int =self._load_meta(SCREAMING_SNAKE_CASE__ )
for line in lines[indices_start_line:]:
try:
A , A : Optional[int] =line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A : int =True
A , A : Optional[Any] =line.rsplit(' ' , 1 )
else:
A : Any =False
A : Tuple =int(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE__ ) )
self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A__ ( lowercase: Union[str, Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A : int =dict((re.sub(r'@@$', '', lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', lowercase ), v) for k, v in d.items() )
A : int ='<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : List[Any] =d[k] # restore
return da
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> str:
# prep
if not os.path.exists(lowercase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase, exist_ok=lowercase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[str] =os.path.join(lowercase, 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Optional[Any] =torch.load(lowercase, map_location='cpu' )
A : Any =chkpt['cfg']['model']
# dicts
A : Any =os.path.join(lowercase, 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Dict =Dictionary.load(lowercase )
A : Optional[Any] =rewrite_dict_keys(src_dict.indices )
A : Tuple =len(lowercase )
A : Any =os.path.join(lowercase, VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
A : List[str] =os.path.join(lowercase, 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : List[str] =os.path.join(lowercase, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase, lowercase )
# model config
A : Tuple =os.path.join(lowercase, 'config.json' )
A : Tuple ={
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
A : int =os.path.join(lowercase, lowercase )
A : List[str] ={
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
A : List[Any] =chkpt['model']
# remove unneeded keys
A : List[Any] =[
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
A : str =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A : Union[str, Any] =model_state_dict.pop(lowercase )
else:
A : List[str] =model_state_dict.pop(lowercase )
A : Any =BioGptConfig.from_pretrained(lowercase )
A : str =BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
A : Tuple =os.path.join(lowercase, lowercase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase, lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[Any] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 661 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_lowercase : Dict =models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_lowercase : str =tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_lowercase : Tuple =tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_lowercase : List[Any] =train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_lowercase : Tuple =test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_lowercase : Tuple =tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_lowercase : Optional[int] =tf.keras.preprocessing.image.img_to_array(test_image)
_lowercase : Tuple =np.expand_dims(test_image, axis=0)
_lowercase : Optional[Any] =classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_lowercase : Dict ="Normal"
if result[0][0] == 1:
_lowercase : Tuple ="Abnormality detected"
| 713 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_lowercase : str =False
_lowercase : Optional[Any] =False
def A__ ( lowercase: Namespace ) -> Optional[int]:
return TrainCommand(lowercase )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Optional[Any] =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE__ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Namespace ) -> List[Any]:
A : Optional[int] =logging.get_logger('transformers-cli/training' )
A : Dict ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =args.output
A : List[str] =args.column_label
A : int =args.column_text
A : Union[str, Any] =args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A : Optional[Any] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
A : Tuple =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Dict =None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
A : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Optional[Any] =args.validation_split
A : str =args.train_batch_size
A : Any =args.valid_batch_size
A : Dict =args.learning_rate
A : List[str] =args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 661 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Tuple =logging.get_logger(__name__)
_lowercase : Union[str, Any] ={
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
'''simple docstring'''
lowercase : List[Any] = 'bridgetower_vision_model'
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Tuple=7_68 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : List[Any]=2_88 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Tuple=1e-05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]:
super().__init__(**__A )
A : Dict =hidden_size
A : Optional[Any] =num_hidden_layers
A : Optional[int] =num_channels
A : Optional[Any] =patch_size
A : List[Any] =image_size
A : Optional[Any] =initializer_factor
A : Optional[Any] =layer_norm_eps
A : Optional[int] =stop_gradient
A : Union[str, Any] =share_layernorm
A : str =remove_last_layer
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : str ) -> "PretrainedConfig":
A , A : str =cls.get_config_dict(__A , **__A )
if config_dict.get('model_type' ) == "bridgetower":
A : str =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
'''simple docstring'''
lowercase : Optional[int] = 'bridgetower_text_model'
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=5_02_65 , SCREAMING_SNAKE_CASE__ : List[Any]=7_68 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Any=30_72 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_14 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Tuple=1e-05 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE__ : List[Any]=True , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Dict:
super().__init__(**__A )
A : int =vocab_size
A : Optional[Any] =hidden_size
A : Any =num_hidden_layers
A : Union[str, Any] =num_attention_heads
A : Any =hidden_act
A : List[str] =initializer_factor
A : Any =intermediate_size
A : Union[str, Any] =hidden_dropout_prob
A : List[Any] =attention_probs_dropout_prob
A : int =max_position_embeddings
A : List[str] =type_vocab_size
A : Optional[int] =layer_norm_eps
A : Union[str, Any] =position_embedding_type
A : Optional[int] =use_cache
A : int =pad_token_id
A : int =bos_token_id
A : str =eos_token_id
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> "PretrainedConfig":
A , A : Any =cls.get_config_dict(__A , **__A )
if config_dict.get('model_type' ) == "bridgetower":
A : Optional[int] =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
'''simple docstring'''
lowercase : Dict = 'bridgetower'
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=7_68 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Tuple=1e-05 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str="add" , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Any:
# TODO: remove this once the Hub files are updated.
A : List[str] =kwargs.pop('text_config_dict' , __A )
A : Optional[Any] =kwargs.pop('vision_config_dict' , __A )
super().__init__(**__A )
A : Any =share_cross_modal_transformer_layers
A : Optional[Any] =hidden_act
A : int =hidden_size
A : Optional[int] =initializer_factor
A : Tuple =layer_norm_eps
A : Optional[Any] =share_link_tower_layers
A : Union[str, Any] =link_tower_type
A : Optional[int] =num_attention_heads
A : List[str] =num_hidden_layers
A : str =tie_word_embeddings
A : Optional[Any] =init_layernorm_from_vision_encoder
if text_config is None:
A : Optional[int] ={}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
A : Optional[int] ={}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
A : int =BridgeTowerTextConfig(**__A )
A : Tuple =BridgeTowerVisionConfig(**__A )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[Any]:
A : Tuple =copy.deepcopy(self.__dict__ )
A : Any =self.text_config.to_dict()
A : List[Any] =self.vision_config.to_dict()
A : Union[str, Any] =self.__class__.model_type
return output
| 714 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Tuple=30 , SCREAMING_SNAKE_CASE__ : int=4_00 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Any=1 / 2_55 , SCREAMING_SNAKE_CASE__ : int=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Optional[Any] =size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
A : Union[str, Any] =parent
A : Union[str, Any] =batch_size
A : Union[str, Any] =num_channels
A : int =min_resolution
A : List[Any] =max_resolution
A : Dict =do_resize
A : Tuple =size
A : List[str] =do_normalize
A : List[Any] =image_mean
A : Dict =image_std
A : Any =do_rescale
A : List[str] =rescale_factor
A : Optional[Any] =do_pad
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Dict:
if not batched:
A : Any =image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
A , A : Union[str, Any] =image.size
else:
A , A : Tuple =image.shape[1], image.shape[2]
if w < h:
A : Any =int(self.size['shortest_edge'] * h / w )
A : Any =self.size['shortest_edge']
elif w > h:
A : Dict =self.size['shortest_edge']
A : Dict =int(self.size['shortest_edge'] * w / h )
else:
A : List[str] =self.size['shortest_edge']
A : Dict =self.size['shortest_edge']
else:
A : List[Any] =[]
for image in image_inputs:
A , A : int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : str =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
A : Tuple =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
A : str =ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
A : str =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
A : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A : List[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : List[str] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : Union[str, Any] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A : Tuple =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Any =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : Optional[int] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A : Optional[int] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Tuple =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : int =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
# prepare image and target
A : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A : List[Any] =json.loads(f.read() )
A : Any ={'image_id': 3_97_69, 'annotations': target}
# encode them
A : str =ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
A : Any =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Optional[Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : List[str] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Dict =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : str =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : Dict =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : Union[str, Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
A : List[Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : int =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
# prepare image, target and masks_path
A : List[str] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A : Optional[int] =json.loads(f.read() )
A : int ={'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
A : Optional[Any] =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A : List[Any] =ConditionalDetrImageProcessor(format='coco_panoptic' )
A : Union[str, Any] =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Dict =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : Dict =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Optional[int] =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : List[Any] =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Any =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : List[Any] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : Any =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : str =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
A : int =82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
A : Any =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : str =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
| 661 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] =logging.get_logger(__name__)
_lowercase : Any ={
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
'''simple docstring'''
lowercase : List[Any] = "audio-spectrogram-transformer"
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_68 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : str=30_72 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1e-12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=10 , SCREAMING_SNAKE_CASE__ : List[str]=10 , SCREAMING_SNAKE_CASE__ : int=10_24 , SCREAMING_SNAKE_CASE__ : List[Any]=1_28 , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]:
super().__init__(**__A )
A : Optional[Any] =hidden_size
A : str =num_hidden_layers
A : List[str] =num_attention_heads
A : Optional[int] =intermediate_size
A : List[str] =hidden_act
A : Any =hidden_dropout_prob
A : Any =attention_probs_dropout_prob
A : Dict =initializer_range
A : List[str] =layer_norm_eps
A : List[Any] =patch_size
A : Optional[Any] =qkv_bias
A : List[Any] =frequency_stride
A : List[str] =time_stride
A : Tuple =max_length
A : Dict =num_mel_bins
| 715 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : List[Any] =1_6
_lowercase : Union[str, Any] =3_2
def A__ ( lowercase: Accelerator, lowercase: int = 16, lowercase: str = "bert-base-cased" ) -> Optional[int]:
A : List[Any] =AutoTokenizer.from_pretrained(lowercase )
A : Any =load_dataset('glue', 'mrpc' )
def tokenize_function(lowercase: Any ):
# max_length=None => use the model max length (it's actually the default)
A : List[str] =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A : Any =datasets.map(
lowercase, batched=lowercase, remove_columns=['idx', 'sentence1', 'sentence2'], load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : Dict =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowercase: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase, padding='max_length', max_length=128, return_tensors='pt' )
return tokenizer.pad(lowercase, padding='longest', return_tensors='pt' )
# Instantiate dataloaders.
A : Union[str, Any] =DataLoader(
tokenized_datasets['train'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
A : str =DataLoader(
tokenized_datasets['validation'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
def A__ ( lowercase: Dict, lowercase: Optional[int], lowercase: Any, lowercase: str ) -> Tuple:
model.eval()
A : Tuple =0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A : Tuple =model(**lowercase )
A : Tuple =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A : Union[str, Any] =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
A : List[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
A : Optional[int] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase, references=lowercase, )
A : Union[str, Any] =metric.compute()
return eval_metric["accuracy"]
def A__ ( lowercase: Union[str, Any], lowercase: Dict ) -> List[str]:
# Initialize accelerator
A : Optional[int] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : int =config['lr']
A : Optional[Any] =int(config['num_epochs'] )
A : Union[str, Any] =int(config['seed'] )
A : List[str] =int(config['batch_size'] )
A : Optional[Any] =args.model_name_or_path
set_seed(lowercase )
A , A : str =get_dataloaders(lowercase, lowercase, lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : List[str] =AutoModelForSequenceClassification.from_pretrained(lowercase, return_dict=lowercase )
# Instantiate optimizer
A : Any =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A : List[str] =optimizer_cls(params=model.parameters(), lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
A : Optional[int] =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A : Dict =1
A : Union[str, Any] =(len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A : List[Any] =get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=0, num_training_steps=lowercase, )
else:
A : List[str] =DummyScheduler(lowercase, total_num_steps=lowercase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A : Optional[int] =accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# We need to keep track of how many total steps we have iterated over
A : Tuple =0
# We also need to keep track of the stating epoch so files are named properly
A : List[str] =0
A : Tuple =evaluate.load('glue', 'mrpc' )
A : Optional[int] =num_epochs
if args.partial_train_epoch is not None:
A : Dict =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A : List[Any] =args.resume_from_checkpoint.split('epoch_' )[1]
A : List[Any] =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A : Union[str, Any] =int(lowercase ) + 1
A : List[str] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
accelerator.print('resumed checkpoint performance:', lowercase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:', lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:', optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir, F'state_{starting_epoch-1}.json' ), 'r' ) as f:
A : Union[str, Any] =json.load(lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A : str ={}
for epoch in range(lowercase, lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
A : Tuple =model(**lowercase )
A : List[Any] =outputs.loss
A : Any =loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A : Union[str, Any] =F'epoch_{epoch}'
A : Optional[Any] =os.path.join(args.output_dir, lowercase )
accelerator.save_state(lowercase )
A : Optional[Any] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
A : Dict =accuracy
A : Optional[Any] =lr_scheduler.get_lr()[0]
A : Any =optimizer.param_groups[0]['lr']
A : str =epoch
A : Dict =overall_step
accelerator.print(F'epoch {epoch}:', lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'state_{epoch}.json' ), 'w' ) as f:
json.dump(lowercase, lowercase )
def A__ ( ) -> Optional[int]:
A : Optional[int] =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path', type=lowercase, default='bert-base-cased', help='Path to pretrained model or model identifier from huggingface.co/models.', required=lowercase, )
parser.add_argument(
'--output_dir', type=lowercase, default='.', help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.', )
parser.add_argument(
'--resume_from_checkpoint', type=lowercase, default=lowercase, help='If the training should continue from a checkpoint folder.', )
parser.add_argument(
'--partial_train_epoch', type=lowercase, default=lowercase, help='If passed, the training will stop after this number of epochs.', )
parser.add_argument(
'--num_epochs', type=lowercase, default=2, help='Number of train epochs.', )
A : str =parser.parse_args()
A : Optional[int] ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 661 | 0 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Optional[int] =logging.get_logger(__name__)
def A__ ( lowercase: int, lowercase: Tuple ) -> Dict:
'''simple docstring'''
A : Any =[]
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def A__ ( lowercase: Union[str, Any], lowercase: Tuple ) -> str:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A : List[str] =state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
A : List[Any] =in_proj_weight[
: encoder_config.hidden_size, :
]
A : Union[str, Any] =in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A : Optional[Any] =in_proj_weight[
-encoder_config.hidden_size :, :
]
def A__ ( lowercase: str, lowercase: Dict, lowercase: int ) -> Tuple:
'''simple docstring'''
A : Union[str, Any] =dct.pop(_UpperCAmelCase )
A : List[str] =val
def A__ ( lowercase: str ) -> Tuple:
'''simple docstring'''
if "handwritten" in checkpoint_url:
A : str ='https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A : int ='https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
A : Dict =Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw ).convert('RGB' )
return im
@torch.no_grad()
def A__ ( lowercase: List[str], lowercase: Any ) -> Union[str, Any]:
'''simple docstring'''
A : Union[str, Any] =ViTConfig(image_size=384, qkv_bias=_UpperCAmelCase )
A : Any =TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A : Union[str, Any] =768
elif "large" in checkpoint_url:
# use ViT-large encoder
A : Optional[Any] =1_024
A : Any =4_096
A : Optional[int] =24
A : List[Any] =16
A : Optional[Any] =1_024
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A : Any =False
A : str ='relu'
A : Any =1_024
A : List[str] =True
A : str =False
A : List[Any] =False
# load HuggingFace model
A : Any =ViTModel(_UpperCAmelCase, add_pooling_layer=_UpperCAmelCase )
A : Dict =TrOCRForCausalLM(_UpperCAmelCase )
A : Any =VisionEncoderDecoderModel(encoder=_UpperCAmelCase, decoder=_UpperCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
A : List[str] =torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu', check_hash=_UpperCAmelCase )['model']
A : List[str] =create_rename_keys(_UpperCAmelCase, _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A : Tuple =state_dict.pop(_UpperCAmelCase )
if key.startswith('decoder' ) and "output_projection" not in key:
A : Optional[int] =val
else:
A : int =val
# load state dict
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image
A : Tuple =ViTImageProcessor(size=encoder_config.image_size )
A : Optional[int] =RobertaTokenizer.from_pretrained('roberta-large' )
A : str =TrOCRProcessor(_UpperCAmelCase, _UpperCAmelCase )
A : Any =processor(images=prepare_img(_UpperCAmelCase ), return_tensors='pt' ).pixel_values
# verify logits
A : Tuple =torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A : int =model(pixel_values=_UpperCAmelCase, decoder_input_ids=_UpperCAmelCase )
A : Dict =outputs.logits
A : int =torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
A : Optional[Any] =torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A : Optional[Any] =torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A : Tuple =torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A : Any =torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10], _UpperCAmelCase, atol=1e-3 ), "First elements of logits not as expected"
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
_lowercase : int =argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowercase : Optional[int] =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 716 |
def A__ ( lowercase: int ) -> int:
if not isinstance(lowercase, lowercase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A : Any =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Optional[Any] ={'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] =['''GLPNFeatureExtractor''']
_lowercase : str =['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A__ ( *lowercase: Tuple, lowercase: Optional[Union[Dict, Any]] = None, lowercase: Dict=True, lowercase: Any=2 ) -> List[Any]:
from .. import __version__
A : Optional[Any] =take_from
A : Union[str, Any] =()
if not isinstance(args[0], lowercase ):
A : List[str] =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase ).base_version ) >= version.parse(lowercase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
A : Tuple =None
if isinstance(lowercase, lowercase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase ),)
A : Union[str, Any] =F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(lowercase, lowercase ):
values += (getattr(lowercase, lowercase ),)
A : Optional[Any] =F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A : List[Any] =F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A : List[Any] =warning + ' ' if standard_warn else ''
warnings.warn(warning + message, lowercase, stacklevel=lowercase )
if isinstance(lowercase, lowercase ) and len(lowercase ) > 0:
A : Any =inspect.getouterframes(inspect.currentframe() )[1]
A : int =call_frame.filename
A : int =call_frame.lineno
A : Optional[int] =call_frame.function
A , A : int =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(lowercase ) == 0:
return
elif len(lowercase ) == 1:
return values[0]
return values
| 661 | 0 |
import sys
import turtle
def A__ ( lowercase: Optional[Any], lowercase: int ) -> Tuple:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A__ ( lowercase: int, lowercase: int, lowercase: Optional[Any], lowercase: Dict, ) -> Union[str, Any]:
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(_lowercase, get_mid(_lowercase, _lowercase ), get_mid(_lowercase, _lowercase ), depth - 1 )
triangle(_lowercase, get_mid(_lowercase, _lowercase ), get_mid(_lowercase, _lowercase ), depth - 1 )
triangle(_lowercase, get_mid(_lowercase, _lowercase ), get_mid(_lowercase, _lowercase ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_lowercase : int =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_lowercase : int =[(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 718 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( lowercase: int, lowercase: str ) -> Dict:
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Dict, lowercase: Tuple, lowercase: str ) -> str:
A : Any =tmp_path / 'cache'
A : Dict ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Dict =JsonDatasetReader(lowercase, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Any, lowercase: Union[str, Any] ) -> Tuple:
A : Tuple =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : Optional[Any] =features.copy() if features else default_expected_features
A : Union[str, Any] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : str =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
], )
def A__ ( lowercase: Optional[int], lowercase: str, lowercase: Dict ) -> Optional[int]:
A : int =tmp_path / 'cache'
A : Tuple ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
A : int =features.copy() if features else default_expected_features
A : str =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( lowercase: Optional[Any], lowercase: str ) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A : str ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
A : Dict =features.copy()
A : List[str] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : int =tmp_path / 'cache'
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Union[str, Any], lowercase: Any, lowercase: str ) -> Optional[Any]:
A : Optional[int] =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =JsonDatasetReader(lowercase, cache_dir=lowercase, split=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def A__ ( lowercase: Optional[Any], lowercase: int, lowercase: Union[str, Any] ) -> List[Any]:
if issubclass(lowercase, lowercase ):
A : int =jsonl_path
elif issubclass(lowercase, lowercase ):
A : Any =[jsonl_path]
A : Optional[Any] =tmp_path / 'cache'
A : Tuple ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[str] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
def A__ ( lowercase: List[str], lowercase: Tuple, lowercase: Optional[Any]=("train",) ) -> Tuple:
assert isinstance(lowercase, lowercase )
for split in splits:
A : List[str] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Tuple, lowercase: Optional[int], lowercase: Any ) -> str:
A : List[str] =tmp_path / 'cache'
A : Union[str, Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : str =JsonDatasetReader({'train': jsonl_path}, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Optional[int] ) -> Tuple:
A : Any =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =features.copy() if features else default_expected_features
A : Dict =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[Any] =JsonDatasetReader({'train': jsonl_path}, features=lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any] ) -> Tuple:
if split:
A : Optional[int] ={split: jsonl_path}
else:
A : Dict ='train'
A : Optional[Any] ={'train': jsonl_path, 'test': jsonl_path}
A : Tuple =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[Any] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( lowercase: List[Any] ) -> Tuple:
return json.load(lowercase )
def A__ ( lowercase: List[Any] ) -> Tuple:
return [json.loads(lowercase ) for line in buffer]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : Any =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : List[Any] =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
A : Union[str, Any] =tmp_path_factory.mktemp('data' ) / f'test.json.{extension}'
A : Union[str, Any] =str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : str =f.read()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : List[str] =f.read()
assert exported_content == original_content
| 661 | 0 |
import functools
def A__ ( lowercase: list[int], lowercase: list[int] ) -> str:
# Validation
if not isinstance(_lowerCamelCase, _lowerCamelCase ) or not all(isinstance(_lowerCamelCase, _lowerCamelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_lowerCamelCase ) != 3 or not all(isinstance(_lowerCamelCase, _lowerCamelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_lowerCamelCase ) == 0:
return 0
if min(_lowerCamelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_lowerCamelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
A : int =set(_lowerCamelCase )
@functools.cache
def dynamic_programming(lowercase: int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] = DDIMPipeline
lowercase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
A : str =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
A : Optional[int] =DDIMScheduler()
A : Optional[Any] ={'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
A : List[Any] =torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A : Union[str, Any] =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A : Optional[int] ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
A : Union[str, Any] ='cpu'
A : Tuple =self.get_dummy_components()
A : Union[str, Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : str =self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
A : str =pipe(**SCREAMING_SNAKE_CASE__ ).images
A : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A : Optional[Any] =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Any ='google/ddpm-cifar10-32'
A : Optional[int] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMScheduler()
A : int =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Dict =torch.manual_seed(0 )
A : Optional[Any] =ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='numpy' ).images
A : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Tuple =np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : Optional[int] ='google/ddpm-ema-bedroom-256'
A : str =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : Optional[int] =ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
A : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Optional[int] =np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 661 | 0 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ) -> Any:
A : List[str] =argparse.ArgumentParser()
parser.add_argument(
'-m', '--pretrained_model_name_or_path', type=lowercase_, default=lowercase_, required=lowercase_, help='Path to pretrained model or model identifier from huggingface.co/models.', )
parser.add_argument(
'-c', '--caption', type=lowercase_, default='robotic cat with wings', help='Text used to generate images.', )
parser.add_argument(
'-n', '--images_num', type=lowercase_, default=4, help='How much images to generate.', )
parser.add_argument(
'-s', '--seed', type=lowercase_, default=42, help='Seed for random process.', )
parser.add_argument(
'-ci', '--cuda_id', type=lowercase_, default=0, help='cuda_id.', )
A : Optional[int] =parser.parse_args()
return args
def A__ ( lowercase: Union[str, Any], lowercase: int, lowercase: Union[str, Any] ) -> Tuple:
if not len(lowercase_ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
A : Union[str, Any] =imgs[0].size
A : Optional[int] =Image.new('RGB', size=(cols * w, rows * h) )
A : Tuple =grid.size
for i, img in enumerate(lowercase_ ):
grid.paste(lowercase_, box=(i % cols * w, i // cols * h) )
return grid
def A__ ( lowercase: str, lowercase: Optional[Any]="robotic cat with wings", lowercase: Tuple=7.5, lowercase: Dict=50, lowercase: Optional[Any]=1, lowercase: Dict=42, ) -> int:
A : List[str] =torch.Generator(pipeline.device ).manual_seed(lowercase_ )
A : int =pipeline(
lowercase_, guidance_scale=lowercase_, num_inference_steps=lowercase_, generator=lowercase_, num_images_per_prompt=lowercase_, ).images
A : Dict =int(math.sqrt(lowercase_ ) )
A : Optional[int] =image_grid(lowercase_, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
_lowercase : List[str] =parse_args()
# Load models and create wrapper for stable diffusion
_lowercase : List[str] =CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
_lowercase : List[str] =CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
_lowercase : Dict =AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
_lowercase : int =UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
_lowercase : Dict =StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_lowercase : str =lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
_lowercase : int =load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
_lowercase : Union[str, Any] =unet.to(torch.device('''cuda''', args.cuda_id))
_lowercase : int =pipeline.to(unet.device)
_lowercase : List[str] =generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
_lowercase : Optional[Any] =os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 720 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
A : Dict =tempfile.mkdtemp()
A : int =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
A : str =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Optional[int] =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
A : Optional[int] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : str =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Union[str, Any] =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
A : Optional[Any] =self.get_image_processor()
A : Optional[Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Dict =self.prepare_image_inputs()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : Optional[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
A : str =self.get_image_processor()
A : Union[str, Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : str =[torch.ones((1, 3, 5, 5) )]
A : Optional[Any] =[[17_64, 26_46]]
A : List[Any] =[[6_83, 10_24]]
A : Union[str, Any] =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : str =[np.ones((1, 3, 5, 5) )]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A : Any =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
A : Tuple =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
A : Optional[Any] =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Any =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
A : Optional[Any] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[Any] =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Dict =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
A : Any =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : Tuple =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : List[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
A : int =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =[tf.ones((1, 3, 5, 5) )]
A : Tuple =[[17_64, 26_46]]
A : Union[str, Any] =[[6_83, 10_24]]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : List[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : Any =[np.ones((1, 3, 5, 5) )]
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A : List[str] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
A : Optional[int] =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Dict =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
A : Any =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Tuple =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
A : Optional[Any] =self.get_image_processor()
A : Dict =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A : Optional[int] =[tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )]
A : Union[str, Any] =[torch.tensor(SCREAMING_SNAKE_CASE__ )]
A : int =[[17_64, 26_46]]
A : int =[[6_83, 10_24]]
A : Dict =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
A : Union[str, Any] =self.get_image_processor()
A : int =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : List[Any] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Tuple =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
A : Dict =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
| 661 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict ) -> Dict:
# test for the above condition
self.test()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
A : Tuple =0
A : int =False
while not completed:
if counter == 1:
self.reset()
A : List[Any] =self.advance()
if not self.does_advance(_a ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
A , A , A : Optional[int] =self.update(_a )
counter += 1
if counter > 1_00_00:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> List[str]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
super(_a , self ).__init__()
if not isinstance(_a , _a ) or len(_a ) == 0:
raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(_a , _a ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
A : Union[str, Any] =token_ids
A : int =len(self.token_ids )
A : Union[str, Any] =-1 # the index of the currently fulfilled step
A : Union[str, Any] =False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
if not isinstance(_a , _a ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(_a )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any:
if not isinstance(_a , _a ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(_a )}' )
A : Union[str, Any] =False
A : str =False
A : List[Any] =False
if self.does_advance(_a ):
self.fulfilled_idx += 1
A : int =True
if self.fulfilled_idx == (self.seqlen - 1):
A : str =True
A : Optional[int] =completed
else:
# failed to make progress.
A : Dict =True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
A : Optional[Any] =False
A : str =0
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> str:
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : str=False ) -> Optional[int]:
A : Tuple =PhrasalConstraint(self.token_ids )
if stateful:
A : Optional[Any] =self.seqlen
A : str =self.fulfilled_idx
A : List[Any] =self.completed
return new_constraint
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> int:
A : List[Any] =max([len(_a ) for one in nested_token_ids] )
A : Any ={}
for token_ids in nested_token_ids:
A : int =root
for tidx, token_id in enumerate(_a ):
if token_id not in level:
A : int ={}
A : Dict =level[token_id]
if no_subsets and self.has_subsets(_a , _a ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
f' {nested_token_ids}.' )
A : List[str] =root
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
A : Tuple =self.trie
for current_token in current_seq:
A : str =start[current_token]
A : Union[str, Any] =list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
A : Optional[int] =self.next_tokens(_a )
return len(_a ) == 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
A : Tuple =list(root.values() )
if len(_a ) == 0:
return 1
else:
return sum([self.count_leaves(_a ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
A : Optional[int] =self.count_leaves(_a )
return len(_a ) != leaf_count
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
super(_a , self ).__init__()
if not isinstance(_a , _a ) or len(_a ) == 0:
raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(_a , _a ) for token_ids in nested_token_ids ):
raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(_a , _a ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
A : str =DisjunctiveTrie(_a )
A : str =nested_token_ids
A : Tuple =self.trie.max_height
A : List[str] =[]
A : int =False
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
A : str =self.trie.next_tokens(self.current_seq )
if len(_a ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
if not isinstance(_a , _a ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(_a )}' )
A : Union[str, Any] =self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if not isinstance(_a , _a ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(_a )}' )
A : List[str] =False
A : Dict =False
A : List[Any] =False
if self.does_advance(_a ):
self.current_seq.append(_a )
A : Tuple =True
else:
A : List[Any] =True
self.reset()
A : Optional[Any] =self.trie.reached_leaf(self.current_seq )
A : Any =completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
A : Union[str, Any] =False
A : Optional[int] =[]
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Union[str, Any]:
A : List[str] =DisjunctiveConstraint(self.token_ids )
if stateful:
A : int =self.seqlen
A : str =self.current_seq
A : Union[str, Any] =self.completed
return new_constraint
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
A : Optional[int] =constraints
# max # of steps required to fulfill a given constraint
A : Tuple =max([c.seqlen for c in constraints] )
A : Any =len(_a )
A : Dict =False
self.init_state()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> str:
A : List[Any] =[]
A : Union[str, Any] =None
A : Union[str, Any] =[constraint.copy(stateful=_a ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
A : str =0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]:
A : Optional[int] =[]
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
A : Optional[int] =constraint.advance()
if isinstance(_a , _a ):
token_list.append(_a )
elif isinstance(_a , _a ):
token_list.extend(_a )
else:
A : Dict =self.inprogress_constraint.advance()
if isinstance(_a , _a ):
token_list.append(_a )
elif isinstance(_a , _a ):
token_list.extend(_a )
if len(_a ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
A , A : Optional[int] =self.add(_a )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
if not isinstance(_a , _a ):
raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' )
A , A : Optional[int] =False, False
if self.completed:
A : str =True
A : List[str] =False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
A , A , A : List[Any] =self.inprogress_constraint.update(_a )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_a ) )
A : Any =None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
A : str =None
if len(self.pending_constraints ) == 0:
# we're done!
A : List[Any] =True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_a ):
A , A , A : str =pending_constraint.update(_a )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(_a )
A : Optional[int] =None
if not complete and stepped:
A : Any =pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
A : Optional[int] =(
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
A : Optional[int] =True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : Dict=True ) -> Optional[Any]:
A : List[Any] =ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
A : Tuple =[
constraint.copy(stateful=_a ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
A : str =self.inprogress_constraint.copy(stateful=_a )
A : str =[constraint.copy() for constraint in self.pending_constraints]
return new_state
| 721 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_lowercase : Optional[Any] =WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def A__ ( lowercase: Optional[int] ) -> Optional[int]:
A : str =test_results.split(' ' )
A : List[str] =0
A : Tuple =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A : List[str] =expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( lowercase: List[Any] ) -> str:
A : Union[str, Any] ={}
A : Optional[Any] =None
A : Union[str, Any] =False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]', lowercase ):
A : List[Any] =True
A : Any =line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
A : Dict =line
A : List[str] =False
return failures
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
A : Tuple =title
A : Dict =doc_test_results['time_spent'].split(',' )[0]
A : Union[str, Any] =doc_test_results['success']
A : Any =doc_test_results['failures']
A : Optional[Any] =self.n_success + self.n_failures
# Failures and success of the modeling tests
A : Union[str, Any] =doc_test_results
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
A : Any =[self._time_spent]
A : List[str] =0
for time in time_spent:
A : List[Any] =time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
A : List[str] =[0, 0, time_parts[0]]
A , A , A : Tuple =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A , A , A : str =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Tuple =40
A : Optional[Any] ={k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
A : Any =''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : Optional[int] =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
A : Tuple =[
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
A : Any =f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
A : Dict =client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
A : List[str] =''
for key, value in failures.items():
A : Any =value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
A : Union[str, Any] =job_name
A : Any ={'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
A : int ={
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
A : Union[str, Any] =self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
A : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
A : Any =f'*Num failures* :{len(job_result["failed"] )} \n'
A : List[Any] =job_result['failures']
A : Any =self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def A__ ( ) -> Union[str, Any]:
A : Any =os.environ['GITHUB_RUN_ID']
A : List[Any] =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
A : Union[str, Any] =requests.get(lowercase ).json()
A : List[Any] ={}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
A : List[str] =math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowercase ):
A : List[str] =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.', lowercase )
return {}
def A__ ( lowercase: str ) -> Optional[Any]:
A : Any ={}
if os.path.exists(lowercase ):
A : List[Any] =os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase, lowercase ), encoding='utf-8' ) as f:
A : Optional[int] =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase, lowercase )}.' ) from e
return _artifact
def A__ ( ) -> int:
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
A : Dict =name
A : Dict =[]
def __str__( self : Optional[Any] ) -> List[str]:
return self.name
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.paths.append({'name': self.name, 'path': path} )
A : Dict[str, Artifact] ={}
A : str =filter(os.path.isdir, os.listdir() )
for directory in directories:
A : Tuple =directory
if artifact_name not in _available_artifacts:
A : int =Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
_lowercase : Optional[int] =get_job_links()
_lowercase : str =retrieve_available_artifacts()
_lowercase : List[Any] =collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_lowercase : Optional[Any] ={
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_lowercase : List[Any] =github_actions_job_links.get('''run_doctests''')
_lowercase : int =available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_lowercase : Dict =retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_lowercase , _lowercase , _lowercase : List[Any] =handle_test_results(artifact['''stats'''])
_lowercase : Any =failed
_lowercase : Union[str, Any] =success
_lowercase : str =time_spent[1:-1] + ''', '''
_lowercase : Any =extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_lowercase : Tuple =line.replace('''FAILED ''', '''''')
_lowercase : int =line.split()[0].replace('''\n''', '''''')
if "::" in line:
_lowercase , _lowercase : str =line.split('''::''')
else:
_lowercase , _lowercase : Union[str, Any] =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_lowercase : Any =docs[file_regex]
doc_test_results[category]["failed"].append(test)
_lowercase : Any =all_failures[test] if test in all_failures else '''N/A'''
_lowercase : Tuple =failure
break
_lowercase : Optional[int] =Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 661 | 0 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_lowercase : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int=7_68 ) -> List[Any]:
super().__init__(UpperCamelCase__ )
A : Any =proj_size
A : str =CLIPVisionModel(UpperCamelCase__ )
A : List[str] =PaintByExampleMapper(UpperCamelCase__ )
A : Any =nn.LayerNorm(config.hidden_size )
A : Tuple =nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
A : List[Any] =nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> int:
A : Optional[Any] =self.model(pixel_values=UpperCamelCase__ )
A : Optional[int] =clip_output.pooler_output
A : List[Any] =self.mapper(latent_states[:, None] )
A : Union[str, Any] =self.final_layer_norm(UpperCamelCase__ )
A : List[str] =self.proj_out(UpperCamelCase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
super().__init__()
A : int =(config.num_hidden_layers + 1) // 5
A : Dict =config.hidden_size
A : Optional[int] =1
A : int =nn.ModuleList(
[
BasicTransformerBlock(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , activation_fn='gelu' , attention_bias=UpperCamelCase__ )
for _ in range(UpperCamelCase__ )
] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> int:
for block in self.blocks:
A : Optional[Any] =block(UpperCamelCase__ )
return hidden_states
| 700 |
_lowercase : Dict ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 661 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A__ ( lowercase: Any, lowercase: Optional[Any], lowercase: str ) -> Union[str, Any]:
A : Any =('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
A : Dict =(
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
A : Union[str, Any] =model.state_dict()
def to_tf_var_name(lowercase: Dict ):
for patt, repl in iter(lowercase ):
A : str =name.replace(lowercase, lowercase )
return F'bert/{name}'
def create_tf_var(lowercase: str, lowercase: str, lowercase: Any ):
A : str =tf.dtypes.as_dtype(tensor.dtype )
A : int =tf.get_variable(dtype=lowercase, shape=tensor.shape, name=lowercase, initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowercase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
A : Union[str, Any] =to_tf_var_name(lowercase )
A : List[Any] =state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
A : Union[str, Any] =torch_tensor.T
A : str =create_tf_var(tensor=lowercase, name=lowercase, session=lowercase )
tf.keras.backend.set_value(lowercase, lowercase )
A : Optional[Any] =session.run(lowercase )
print(F'Successfully created {tf_name}: {np.allclose(lowercase, lowercase )}' )
A : Dict =tf.train.Saver(tf.trainable_variables() )
saver.save(lowercase, os.path.join(lowercase, model_name.replace('-', '_' ) + '.ckpt' ) )
def A__ ( lowercase: Dict=None ) -> Tuple:
A : int =argparse.ArgumentParser()
parser.add_argument('--model_name', type=lowercase, required=lowercase, help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir', type=lowercase, default=lowercase, required=lowercase, help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path', type=lowercase, required=lowercase, help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir', type=lowercase, required=lowercase, help='Directory in which to save tensorflow model' )
A : Any =parser.parse_args(lowercase )
A : Union[str, Any] =BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path ), cache_dir=args.cache_dir, )
convert_pytorch_checkpoint_to_tf(model=lowercase, ckpt_dir=args.tf_cache_dir, model_name=args.model_name )
if __name__ == "__main__":
main()
| 701 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( lowercase: str ) -> List[str]:
def decorator(lowercase: int ):
A : Tuple =getattr(lowercase, 'handle_key', [] )
handle += [key]
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
def A__ ( *lowercase: List[str] ) -> Dict:
def decorator(lowercase: Union[str, Any] ):
A : Optional[int] =getattr(lowercase, 'handle_key', [] )
handle += keys
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __new__( cls : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
A : Dict =super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , 'key_handler' ):
setattr(SCREAMING_SNAKE_CASE__ , 'key_handler' , {} )
setattr(SCREAMING_SNAKE_CASE__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
A : Optional[Any] =getattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , [] )
for key in handled_keys:
A : str =value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ) -> Any:
A : str =get_character()
if char != KEYMAP["undefined"]:
A : List[str] =ord(SCREAMING_SNAKE_CASE__ )
A : List[str] =cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
A : List[str] =char
return handler(cls )
else:
return None
def A__ ( cls: Optional[int] ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 661 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase : str =logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase ):
'''simple docstring'''
lowercase : List[str] = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A : Optional[int] =deprecated_arg[3:]
A : Union[str, Any] =not kwargs.pop(lowercase__ )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
A : List[Any] =kwargs.pop('tpu_name' , self.tpu_name )
A : Optional[int] =kwargs.pop('device_idx' , self.device_idx )
A : int =kwargs.pop('eager_mode' , self.eager_mode )
A : str =kwargs.pop('use_xla' , self.use_xla )
super().__init__(**lowercase__ )
lowercase : Any = field(
default=_UpperCAmelCase , metadata={"help": "Name of TPU"} , )
lowercase : Dict = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowercase : List[Any] = field(default=_UpperCAmelCase , metadata={"help": "Benchmark models in eager model."} )
lowercase : int = field(
default=_UpperCAmelCase , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
requires_backends(self , ['tf'] )
A : Optional[Any] =None
if self.tpu:
try:
if self.tpu_name:
A : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A : Union[str, Any] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A : str =None
return tpu
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A : Any =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
A : Dict =tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
A : Optional[Any] =tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}' )
return strategy
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
return self.n_gpu > 0
| 702 |
import math
def A__ ( lowercase: int ) -> list:
A : Optional[Any] =[True] * n
A : Tuple =False
A : List[Any] =False
A : Dict =True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A : Dict =i * 2
while index < n:
A : Dict =False
A : Dict =index + i
A : Tuple =[2]
for i in range(3, lowercase, 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def A__ ( lowercase: int = 999_966_663_333 ) -> int:
A : Optional[int] =math.floor(math.sqrt(lowercase ) ) + 100
A : Optional[int] =prime_sieve(lowercase )
A : Optional[Any] =0
A : List[Any] =0
A : Union[str, Any] =primes[prime_index]
while (last_prime**2) <= limit:
A : Tuple =primes[prime_index + 1]
A : Optional[int] =last_prime**2
A : Tuple =next_prime**2
# Get numbers divisible by lps(current)
A : int =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A : List[Any] =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A : Any =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A : List[str] =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 661 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : Union[str, Any] =logging.get_logger(__name__)
_lowercase : Union[str, Any] ={
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Union[str, Any] = "deberta-v2"
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Any=12_81_00 , SCREAMING_SNAKE_CASE__ : List[Any]=15_36 , SCREAMING_SNAKE_CASE__ : List[str]=24 , SCREAMING_SNAKE_CASE__ : List[Any]=24 , SCREAMING_SNAKE_CASE__ : int=61_44 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=1e-7 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=-1 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : int =hidden_size
A : List[str] =num_hidden_layers
A : List[Any] =num_attention_heads
A : Any =intermediate_size
A : str =hidden_act
A : List[Any] =hidden_dropout_prob
A : List[str] =attention_probs_dropout_prob
A : List[str] =max_position_embeddings
A : Union[str, Any] =type_vocab_size
A : Optional[Any] =initializer_range
A : List[Any] =relative_attention
A : Optional[Any] =max_relative_positions
A : Union[str, Any] =pad_token_id
A : str =position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE__ ) == str:
A : Optional[Any] =[x.strip() for x in pos_att_type.lower().split('|' )]
A : Dict =pos_att_type
A : Tuple =vocab_size
A : Dict =layer_norm_eps
A : str =kwargs.get('pooler_hidden_size' , SCREAMING_SNAKE_CASE__ )
A : Any =pooler_dropout
A : Dict =pooler_hidden_act
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
if self.task == "multiple-choice":
A : Optional[Any] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A : int ={0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
return 12
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" = None , ) -> Optional[Any]:
A : List[str] =super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 703 |
import heapq
def A__ ( lowercase: dict ) -> set[int]:
A : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase, [-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
A : Dict =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A : List[str] =heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A : str =elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 661 | 0 |
_lowercase : List[Any] =range(2, 2_0 + 1)
_lowercase : List[Any] =[1_0**k for k in range(ks[-1] + 1)]
_lowercase : Dict ={}
def A__ ( lowercase: List[Any], lowercase: Union[str, Any], lowercase: int, lowercase: str ) -> Dict:
A : Tuple =sum(a_i[j] for j in range(__lowercase, len(__lowercase ) ) )
A : Any =sum(a_i[j] * base[j] for j in range(min(len(__lowercase ), __lowercase ) ) )
A : str =0, 0
A : List[Any] =n - i
A : Tuple =memo.get(__lowercase )
if sub_memo is not None:
A : Dict =sub_memo.get(__lowercase )
if jumps is not None and len(__lowercase ) > 0:
# find and make the largest jump without going over
A : Optional[Any] =-1
for _k in range(len(__lowercase ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A : List[Any] =_k
break
if max_jump >= 0:
A : List[Any] =jumps[max_jump]
# since the difference between jumps is cached, add c
A : int =diff + c
for j in range(min(__lowercase, len(__lowercase ) ) ):
A : Optional[Any] =divmod(__lowercase, 10 )
if new_c > 0:
add(__lowercase, __lowercase, __lowercase )
else:
A : List[str] =[]
else:
A : Optional[int] ={c: []}
A : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A : int =next_term(__lowercase, k - 1, i + dn, __lowercase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A : Optional[int] =compute(__lowercase, __lowercase, i + dn, __lowercase )
diff += _diff
dn += terms_jumped
A : Any =sub_memo[c]
# keep jumps sorted by # of terms skipped
A : Union[str, Any] =0
while j < len(__lowercase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__lowercase, (diff, dn, k) )
return (diff, dn)
def A__ ( lowercase: int, lowercase: int, lowercase: List[Any], lowercase: int ) -> Optional[int]:
if i >= n:
return 0, i
if k > len(__lowercase ):
a_i.extend([0 for _ in range(k - len(__lowercase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A : int =i
A : Union[str, Any] =0, 0, 0
for j in range(len(__lowercase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A : Tuple =ds_c + ds_b
diff += addend
A : List[Any] =0
for j in range(__lowercase ):
A : Optional[Any] =a_i[j] + addend
A : Dict =divmod(__lowercase, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__lowercase, __lowercase, __lowercase )
return diff, i - start_i
def A__ ( lowercase: Optional[Any], lowercase: int, lowercase: Any ) -> Optional[Any]:
for j in range(__lowercase, len(__lowercase ) ):
A : Optional[int] =digits[j] + addend
if s >= 10:
A : Union[str, Any] =divmod(__lowercase, 10 )
A : List[Any] =addend // 10 + quotient
else:
A : Union[str, Any] =s
A : Optional[Any] =addend // 10
if addend == 0:
break
while addend > 0:
A : Dict =divmod(__lowercase, 10 )
digits.append(__lowercase )
def A__ ( lowercase: int = 10**15 ) -> int:
A : Optional[Any] =[1]
A : Union[str, Any] =1
A : Any =0
while True:
A : Dict =next_term(__lowercase, 20, i + dn, __lowercase )
dn += terms_jumped
if dn == n - i:
break
A : Any =0
for j in range(len(__lowercase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 704 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase : List[Any] =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
A : Tuple =feature_size
A : int =sampling_rate
A : List[str] =padding_value
A : Tuple =kwargs.pop('padding_side' , 'right' )
A : str =kwargs.pop('return_attention_mask' , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A : Tuple ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
A : Dict =processed_features[self.model_input_names[0]]
A : int =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE__ ) == 0:
if return_attention_mask:
A : List[Any] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A : List[str] =required_input[0]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A : Any =0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE__ ):
A : Dict =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
A : List[Any] ='tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
A : Optional[int] ='pt'
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float, list, tuple, np.ndarray) ):
A : Union[str, Any] ='np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A : int =to_numpy(SCREAMING_SNAKE_CASE__ )
else:
A : List[Any] =[to_numpy(SCREAMING_SNAKE_CASE__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A : List[Any] =self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =processed_features[self.model_input_names[0]]
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if not all(len(SCREAMING_SNAKE_CASE__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A : Tuple =[]
for i in range(SCREAMING_SNAKE_CASE__ ):
A : int ={k: v[i] for k, v in processed_features.items()}
# truncation
A : List[Any] =self._truncate(
SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )
truncated_inputs.append(SCREAMING_SNAKE_CASE__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A : Any =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A : Optional[Any] =PaddingStrategy.MAX_LENGTH
A : List[Any] ={}
for i in range(SCREAMING_SNAKE_CASE__ ):
# padding
A : Optional[Any] =self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
for key, value in outputs.items():
if key not in batch_outputs:
A : Dict =[]
if value.dtype is np.dtype(np.floataa ):
A : Tuple =value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE__ )
return BatchFeature(SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> dict:
A : Optional[int] =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Tuple =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : int =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A : str =np.ones(len(SCREAMING_SNAKE_CASE__ ) , dtype=np.intaa )
if needs_to_be_padded:
A : Union[str, Any] =max_length - len(SCREAMING_SNAKE_CASE__ )
if self.padding_side == "right":
if return_attention_mask:
A : Dict =np.pad(
processed_features['attention_mask'] , (0, difference) )
A : str =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A : List[Any] =np.pad(
processed_features['attention_mask'] , (difference, 0) )
A : Union[str, Any] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A : Tuple =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Any =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : List[str] =len(SCREAMING_SNAKE_CASE__ ) > max_length
if needs_to_be_truncated:
A : Union[str, Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A : Dict =processed_features['attention_mask'][:max_length]
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
A : List[Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Tuple =PaddingStrategy(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Optional[int] =padding
else:
A : List[str] =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 661 | 0 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
'''simple docstring'''
lowercase : Optional[Any] = ComputeEnvironment.AMAZON_SAGEMAKER
lowercase : str = True
lowercase : List[Any] = "ml.p3.2xlarge"
lowercase : Optional[Any] = "accelerate_sagemaker_execution_role"
lowercase : Union[str, Any] = "hf-sm"
lowercase : Any = "us-east-1"
lowercase : Optional[Any] = 1
lowercase : Dict = "accelerate-sagemaker-1"
lowercase : str = "1.6"
lowercase : List[str] = "4.4"
lowercase : str = "train.py"
lowercase : Optional[Any] = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
lowercase : str = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
A : str =_convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , _A )
assert isinstance(converted_args['do_train'] , _A )
assert isinstance(converted_args['epochs'] , _A )
assert isinstance(converted_args['learning_rate'] , _A )
assert isinstance(converted_args['max_steps'] , _A )
with pytest.raises(_A ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 705 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : List[str] ={
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : int = "deberta-v2"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str=12_81_00 , SCREAMING_SNAKE_CASE__ : List[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Dict=24 , SCREAMING_SNAKE_CASE__ : List[str]=24 , SCREAMING_SNAKE_CASE__ : List[str]=61_44 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-7 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=-1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : Dict =hidden_size
A : Optional[Any] =num_hidden_layers
A : Optional[int] =num_attention_heads
A : Optional[int] =intermediate_size
A : Any =hidden_act
A : Any =hidden_dropout_prob
A : Union[str, Any] =attention_probs_dropout_prob
A : Optional[Any] =max_position_embeddings
A : Tuple =type_vocab_size
A : Tuple =initializer_range
A : int =relative_attention
A : int =max_relative_positions
A : Optional[Any] =pad_token_id
A : Union[str, Any] =position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE__ ) == str:
A : Any =[x.strip() for x in pos_att_type.lower().split('|' )]
A : Any =pos_att_type
A : Tuple =vocab_size
A : Any =layer_norm_eps
A : Optional[Any] =kwargs.get('pooler_hidden_size' , SCREAMING_SNAKE_CASE__ )
A : str =pooler_dropout
A : Any =pooler_hidden_act
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A : List[Any] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A : int ={0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
return 12
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
A : str =super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 661 | 0 |
def A__ ( lowercase: Optional[int] ) -> Union[str, Any]:
A : Union[str, Any] =0
for ch in input_str:
A : Tuple =ord(lowerCAmelCase__ )
A : Optional[int] =pow(2, lowerCAmelCase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Tuple = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 5_02_57 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : int = 7_68 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "gelu_new" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 1e-5 , SCREAMING_SNAKE_CASE__ : float = 0.0_2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[str]:
super().__init__()
A : str =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
A : List[Any] =prefix_inner_dim
A : Dict =prefix_hidden_dim
A : List[str] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A : Dict =GPTaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , n_positions=SCREAMING_SNAKE_CASE__ , n_embd=SCREAMING_SNAKE_CASE__ , n_layer=SCREAMING_SNAKE_CASE__ , n_head=SCREAMING_SNAKE_CASE__ , n_inner=SCREAMING_SNAKE_CASE__ , activation_function=SCREAMING_SNAKE_CASE__ , resid_pdrop=SCREAMING_SNAKE_CASE__ , embd_pdrop=SCREAMING_SNAKE_CASE__ , attn_pdrop=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ , initializer_range=SCREAMING_SNAKE_CASE__ , scale_attn_weights=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE__ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE__ , )
A : Dict =GPTaLMHeadModel(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , ) -> Optional[Any]:
A : str =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
A : Any =self.encode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.decode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A : int =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A : Optional[int] =torch.cat((dummy_token, input_ids) , dim=1 )
A : Dict =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : torch.device ) -> torch.Tensor:
return torch.zeros(SCREAMING_SNAKE_CASE__ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
return self.encode_prefix(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
A : Dict =torch.split(SCREAMING_SNAKE_CASE__ , 1 , dim=0 )
A : int =[]
A : Optional[int] =[]
for feature in features:
A : int =self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE__ ) ) # back to the clip feature
# Only support beam search for now
A , A : Dict =self.generate_beam(
input_embeds=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A : str =torch.stack(SCREAMING_SNAKE_CASE__ )
A : int =torch.stack(SCREAMING_SNAKE_CASE__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : int = 67 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Dict:
A : Dict =eos_token_id
A : str =None
A : List[Any] =None
A : List[Any] =torch.ones(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.int )
A : str =torch.zeros(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.bool )
if input_embeds is not None:
A : Any =input_embeds
else:
A : List[Any] =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
A : Any =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ )
A : str =outputs.logits
A : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A : List[str] =logits.softmax(-1 ).log()
if scores is None:
A , A : Any =logits.topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Any =generated.expand(SCREAMING_SNAKE_CASE__ , *generated.shape[1:] )
A , A : Tuple =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A : Union[str, Any] =next_tokens
else:
A : str =tokens.expand(SCREAMING_SNAKE_CASE__ , *tokens.shape[1:] )
A : Optional[int] =torch.cat((tokens, next_tokens) , dim=1 )
else:
A : Optional[Any] =-float(np.inf )
A : Tuple =0
A : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A : int =scores_sum / seq_lengths[:, None]
A , A : Optional[int] =scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Dict =next_tokens // scores_sum.shape[1]
A : Optional[Any] =seq_lengths[next_tokens_source]
A : Tuple =next_tokens % scores_sum.shape[1]
A : Optional[Any] =next_tokens.unsqueeze(1 )
A : Optional[Any] =tokens[next_tokens_source]
A : Any =torch.cat((tokens, next_tokens) , dim=1 )
A : List[str] =generated[next_tokens_source]
A : List[Any] =scores_sum_average * seq_lengths
A : Optional[Any] =is_stopped[next_tokens_source]
A : Optional[int] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A : Any =torch.cat((generated, next_token_embed) , dim=1 )
A : Optional[int] =is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE__ ).squeeze()
if is_stopped.all():
break
A : Optional[Any] =scores / seq_lengths
A : str =scores.argsort(descending=SCREAMING_SNAKE_CASE__ )
# tokens tensors are already padded to max_seq_length
A : Optional[Any] =[tokens[i] for i in order]
A : Any =torch.stack(SCREAMING_SNAKE_CASE__ , dim=0 )
A : str =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661 | 0 |
def A__ ( lowercase: List[Any], lowercase: Tuple ) -> List[str]:
A : int =len(lowerCAmelCase_ )
A : List[str] =[]
for i in range(len(lowerCAmelCase_ ) - pat_len + 1 ):
A : Optional[Any] =True
for j in range(lowerCAmelCase_ ):
if s[i + j] != pattern[j]:
A : Optional[Any] =False
break
if match_found:
position.append(lowerCAmelCase_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 707 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[int] =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = XLMRobertaTokenizer
lowercase : Dict = XLMRobertaTokenizerFast
lowercase : str = True
lowercase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A : List[str] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
A : List[str] ='<pad>'
A : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Union[str, Any] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : Any =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : Tuple =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A : Union[str, Any] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : Any =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[Any] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : str =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A : List[str] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
A : Optional[int] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
A : List[Any] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : List[Any] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
A : Optional[Any] =XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
A : int =pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
A : Union[str, Any] =self.get_tokenizer()
A : int =self.get_rust_tokenizer()
A : List[str] ='I was born in 92000, and this is falsé.'
A : Union[str, Any] =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Tuple =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.get_rust_tokenizer()
A : int =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Dict =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : Any ='Hello World!'
A : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
A : Any =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A : int =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# fmt: off
A : List[Any] ={'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 661 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowercase : Optional[Any] =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
_lowercase : Optional[int] =list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_lowercase : Dict =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : Tuple = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
lowercase : int = field(
default=lowerCAmelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase : Any = field(
default=lowerCAmelCase_ , metadata={"help": "The column name of the images in the files. If not set, will try to use \'image\' or \'img\'."} , )
lowercase : Tuple = field(default=lowerCAmelCase_ , metadata={"help": "A folder containing the training data."} )
lowercase : Tuple = field(default=lowerCAmelCase_ , metadata={"help": "A folder containing the validation data."} )
lowercase : Any = field(
default=0.1_5 , metadata={"help": "Percent to split off of train for validation."} )
lowercase : Optional[Any] = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
lowercase : Optional[int] = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
lowercase : Optional[Any] = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase : Dict = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
A : List[Any] ={}
if self.train_dir is not None:
A : Optional[int] =self.train_dir
if self.validation_dir is not None:
A : str =self.validation_dir
A : Union[str, Any] =data_files if data_files else None
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don\'t set if you want to train a model from scratch."
)
} , )
lowercase : Union[str, Any] = field(
default=lowerCAmelCase_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase_ )} , )
lowercase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase : Dict = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
lowercase : Optional[Any] = field(
default=lowerCAmelCase_ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
lowercase : Optional[Any] = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase : Tuple = field(default=lowerCAmelCase_ , metadata={"help": "Name or path of preprocessor config."} )
lowercase : Optional[Any] = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowercase : List[str] = field(
default=lowerCAmelCase_ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
lowercase : str = field(
default=lowerCAmelCase_ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
lowercase : Union[str, Any] = field(
default=lowerCAmelCase_ , metadata={"help": "Stride to use for the encoder."} , )
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=1_92 , SCREAMING_SNAKE_CASE__ : Dict=32 , SCREAMING_SNAKE_CASE__ : Dict=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.6 ) -> Tuple:
A : Dict =input_size
A : int =mask_patch_size
A : List[str] =model_patch_size
A : str =mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
A : List[str] =self.input_size // self.mask_patch_size
A : Any =self.mask_patch_size // self.model_patch_size
A : Optional[int] =self.rand_size**2
A : Union[str, Any] =int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ) -> Union[str, Any]:
A : Optional[int] =np.random.permutation(self.token_count )[: self.mask_count]
A : List[str] =np.zeros(self.token_count , dtype=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =1
A : int =mask.reshape((self.rand_size, self.rand_size) )
A : str =mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def A__ ( lowercase: Tuple ) -> Dict:
A : int =torch.stack([example['pixel_values'] for example in examples] )
A : Optional[Any] =torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def A__ ( ) -> Tuple:
A : Tuple =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A : Union[str, Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A : Union[str, Any] =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim', _snake_case, _snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A : int =training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A : Optional[int] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A : Dict =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
A : str =load_dataset(
data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# If we don't have a validation split, split off a percentage of train as validation.
A : Tuple =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _snake_case ) and data_args.train_val_split > 0.0:
A : str =ds['train'].train_test_split(data_args.train_val_split )
A : Tuple =split['train']
A : Union[str, Any] =split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A : Union[str, Any] ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
A : Optional[int] =AutoConfig.from_pretrained(model_args.config_name_or_path, **_snake_case )
elif model_args.model_name_or_path:
A : int =AutoConfig.from_pretrained(model_args.model_name_or_path, **_snake_case )
else:
A : List[Any] =CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_snake_case, 'decoder_type' ):
A : Dict ='simmim'
# adapt config
A : str =model_args.image_size if model_args.image_size is not None else config.image_size
A : Any =model_args.patch_size if model_args.patch_size is not None else config.patch_size
A : Union[str, Any] =(
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
A : Any =AutoImageProcessor.from_pretrained(model_args.image_processor_name, **_snake_case )
elif model_args.model_name_or_path:
A : Any =AutoImageProcessor.from_pretrained(model_args.model_name_or_path, **_snake_case )
else:
A : int ={
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
A : List[str] =IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
A : Optional[int] =AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=_snake_case, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
A : Dict =AutoModelForMaskedImageModeling.from_config(_snake_case )
if training_args.do_train:
A : Tuple =ds['train'].column_names
else:
A : Dict =ds['validation'].column_names
if data_args.image_column_name is not None:
A : int =data_args.image_column_name
elif "image" in column_names:
A : int ='image'
elif "img" in column_names:
A : Tuple ='img'
else:
A : Dict =column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
A : Tuple =Compose(
[
Lambda(lambda lowercase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size, scale=(0.67, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std ),
] )
# create mask generator
A : Dict =MaskGenerator(
input_size=model_args.image_size, mask_patch_size=data_args.mask_patch_size, model_patch_size=model_args.patch_size, mask_ratio=data_args.mask_ratio, )
def preprocess_images(lowercase: Any ):
A : Optional[int] =[transforms(_snake_case ) for image in examples[image_column_name]]
A : Optional[Any] =[mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
A : str =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
A : Any =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Initialize our trainer
A : Optional[Any] =Trainer(
model=_snake_case, args=_snake_case, train_dataset=ds['train'] if training_args.do_train else None, eval_dataset=ds['validation'] if training_args.do_eval else None, tokenizer=_snake_case, data_collator=_snake_case, )
# Training
if training_args.do_train:
A : Union[str, Any] =None
if training_args.resume_from_checkpoint is not None:
A : str =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A : str =last_checkpoint
A : Dict =trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('train', train_result.metrics )
trainer.save_metrics('train', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A : Union[str, Any] =trainer.evaluate()
trainer.log_metrics('eval', _snake_case )
trainer.save_metrics('eval', _snake_case )
# Write model card and (optionally) push to hub
A : Union[str, Any] ={
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
if __name__ == "__main__":
main()
| 708 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict ={
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "xglm"
lowercase : Any = ["past_key_values"]
lowercase : Dict = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=25_60_08 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : str=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
A : str =vocab_size
A : Union[str, Any] =max_position_embeddings
A : Optional[Any] =d_model
A : Optional[int] =ffn_dim
A : int =num_layers
A : Any =attention_heads
A : Dict =activation_function
A : List[Any] =dropout
A : str =attention_dropout
A : List[Any] =activation_dropout
A : List[Any] =layerdrop
A : List[Any] =init_std
A : Union[str, Any] =scale_embedding # scale factor will be sqrt(d_model) if True
A : List[str] =use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 661 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase : Union[str, Any] = "char"
lowercase : Union[str, Any] = "bpe"
lowercase : List[Any] = "wp"
_lowercase : Optional[Any] =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase : Optional[Any] = ["image_processor", "char_tokenizer"]
lowercase : str = "ViTImageProcessor"
lowercase : List[Any] = "MgpstrTokenizer"
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
A : Tuple =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
A : List[Any] =kwargs.pop('feature_extractor' )
A : int =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
A : Tuple =tokenizer
A : List[Any] =AutoTokenizer.from_pretrained('gpt2' )
A : Any =AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
A : Any =self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
A : Union[str, Any] =self.char_tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
A : List[Any] =encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
A , A , A : Optional[int] =sequences
A : Optional[int] =char_preds.size(0 )
A , A : Any =self._decode_helper(_SCREAMING_SNAKE_CASE , 'char' )
A , A : Any =self._decode_helper(_SCREAMING_SNAKE_CASE , 'bpe' )
A , A : Any =self._decode_helper(_SCREAMING_SNAKE_CASE , 'wp' )
A : Dict =[]
A : str =[]
for i in range(_SCREAMING_SNAKE_CASE ):
A : Optional[Any] =[char_scores[i], bpe_scores[i], wp_scores[i]]
A : Tuple =[char_strs[i], bpe_strs[i], wp_strs[i]]
A : Any =scores.index(max(_SCREAMING_SNAKE_CASE ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A : Dict ={}
A : List[str] =final_strs
A : str =final_scores
A : int =char_strs
A : Tuple =bpe_strs
A : str =wp_strs
return out
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
if format == DecodeType.CHARACTER:
A : int =self.char_decode
A : int =1
A : Union[str, Any] ='[s]'
elif format == DecodeType.BPE:
A : Optional[int] =self.bpe_decode
A : List[str] =2
A : Any ='#'
elif format == DecodeType.WORDPIECE:
A : List[Any] =self.wp_decode
A : Optional[int] =1_02
A : Optional[int] ='[SEP]'
else:
raise ValueError(f'Format {format} is not supported.' )
A , A : int =[], []
A : Tuple =pred_logits.size(0 )
A : List[str] =pred_logits.size(1 )
A , A : Dict =pred_logits.topk(1 , dim=-1 , largest=_SCREAMING_SNAKE_CASE , sorted=_SCREAMING_SNAKE_CASE )
A : str =preds_index.view(-1 , _SCREAMING_SNAKE_CASE )[:, 1:]
A : List[str] =decoder(_SCREAMING_SNAKE_CASE )
A , A : Optional[Any] =torch.nn.functional.softmax(_SCREAMING_SNAKE_CASE , dim=2 ).max(dim=2 )
A : int =preds_max_prob[:, 1:]
for index in range(_SCREAMING_SNAKE_CASE ):
A : Dict =preds_str[index].find(_SCREAMING_SNAKE_CASE )
A : Union[str, Any] =preds_str[index][:pred_eos]
A : List[Any] =preds_index[index].cpu().tolist()
A : Dict =pred_index.index(_SCREAMING_SNAKE_CASE ) if eos_token in pred_index else -1
A : Dict =preds_max_prob[index][: pred_eos_index + 1]
A : Tuple =pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_SCREAMING_SNAKE_CASE )
conf_scores.append(_SCREAMING_SNAKE_CASE )
return dec_strs, conf_scores
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
A : int =[seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )]
return decode_strs
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
return self.bpe_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
A : List[str] =[seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )]
return decode_strs
| 709 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase : List[str] ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def A__ ( ) -> List[Any]:
A : Any =_ask_options(
'In which compute environment are you running?', ['This machine', 'AWS (Amazon SageMaker)'], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A : Tuple =get_sagemaker_input()
else:
A : str =get_cluster_input()
return config
def A__ ( lowercase: int=None ) -> str:
if subparsers is not None:
A : List[str] =subparsers.add_parser('config', description=lowercase )
else:
A : Union[str, Any] =argparse.ArgumentParser('Accelerate config command', description=lowercase )
parser.add_argument(
'--config_file', default=lowercase, help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
), )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def A__ ( lowercase: Tuple ) -> List[Any]:
A : Union[str, Any] =get_user_input()
if args.config_file is not None:
A : Optional[Any] =args.config_file
else:
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
A : Union[str, Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase )
else:
config.to_yaml_file(lowercase )
print(F'accelerate configuration saved at {config_file}' )
def A__ ( ) -> Optional[int]:
A : Any =config_command_parser()
A : int =parser.parse_args()
config_command(lowercase )
if __name__ == "__main__":
main()
| 661 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class SCREAMING_SNAKE_CASE_ ( _A ):
'''simple docstring'''
lowercase : Tuple = None
lowercase : int = None
lowercase : Optional[int] = None
lowercase : int = None
class SCREAMING_SNAKE_CASE_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE__ : Dict="cls" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , **SCREAMING_SNAKE_CASE__ : int , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A : List[Any] =project_dim
A : Optional[int] =pooler_fn
A : Dict =learn_encoder
A : int =use_attention_mask
class SCREAMING_SNAKE_CASE_ ( _A ):
'''simple docstring'''
lowercase : Dict = [r"pooler", r"logit_scale"]
lowercase : List[Any] = [r"position_ids", r"predictions.decoder.bias"]
lowercase : List[str] = "roberta"
lowercase : List[str] = RobertaSeriesConfig
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
super().__init__(UpperCamelCase__ )
A : int =XLMRobertaModel(UpperCamelCase__ )
A : Tuple =nn.Linear(config.hidden_size , config.project_dim )
A : Tuple =getattr(UpperCamelCase__ , 'has_pre_transformation' , UpperCamelCase__ )
if self.has_pre_transformation:
A : Optional[Any] =nn.Linear(config.hidden_size , config.project_dim )
A : str =nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> str:
A : List[Any] =return_dict if return_dict is not None else self.config.use_return_dict
A : Tuple =self.base_model(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_attentions=UpperCamelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase__ , )
if self.has_pre_transformation:
A : Optional[Any] =outputs['hidden_states'][-2]
A : Dict =self.pre_LN(UpperCamelCase__ )
A : Any =self.transformation_pre(UpperCamelCase__ )
return TransformationModelOutput(
projection_state=UpperCamelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
A : Tuple =self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCamelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 710 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[str] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : Dict =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : List[Any] =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Tuple =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : Dict =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : List[Any] =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : Optional[int] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : Any =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[Any] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Optional[Any] =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : List[Any] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: Dict ) -> int:
if _re_test_backend.search(lowercase ) is None:
return None
A : Any =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Any ) -> List[Any]:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : Optional[Any] =f.readlines()
A : Dict =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Optional[int] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : int =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : int =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : int =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : Optional[int] =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : Dict =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : str ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Optional[int] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : Optional[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : Optional[Any] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : int =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : Optional[int] =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : Optional[int] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : Optional[Any] =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : Any =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Optional[Any] ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : str =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Optional[Any] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : Any =lines[line_index]
A : Any =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Dict =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Any, lowercase: int ) -> Dict:
def find_duplicates(lowercase: List[str] ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : List[Any] =[]
for key in import_dict_objects.keys():
A : List[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : Tuple =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> List[str]:
A : Dict =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Any =os.path.join(lowercase, '__init__.py' )
A : Union[str, Any] =parse_init(lowercase )
if objects is not None:
A : str =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Any =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> int:
A : List[str] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : Any =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : List[str] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : Optional[Any] =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : Dict =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Tuple =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> Tuple:
# This is to make sure the transformers module imported is the one in the repo.
A : str =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : Any =spec.loader.load_module()
A : Any =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : Dict ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 661 | 0 |
from __future__ import annotations
import time
import numpy as np
_lowercase : Optional[Any] =[8, 5, 9, 7]
_lowercase : List[str] =[
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowercase : Any =[
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , ) -> Tuple:
A : List[Any] =claim_vector
A : List[str] =allocated_resources_table
A : Any =maximum_claim_table
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_A ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
return {self.__need().index(_A ): i for i in self.__need()}
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
A : Any =self.__need()
A : Union[str, Any] =self.__allocated_resources_table
A : Optional[Any] =self.__available_resources()
A : Union[str, Any] =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
A : int =False
for each_need in need_list:
A : Optional[int] =True
for index, need in enumerate(_A ):
if need > available_resources[index]:
A : Optional[Any] =False
break
if execution:
A : List[str] =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A : str =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(_A )
# update available/freed resources stack
A : Any =np.array(_A ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_A ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(_A ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(_A ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_A ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_A ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : Any =logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
lowercase : Optional[str] = field(
default="linear" , metadata={"help": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 661 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowercase : List[str] =['''small''', '''medium''', '''large''']
_lowercase : Optional[int] ='''lm_head.decoder.weight'''
_lowercase : Optional[Any] ='''lm_head.weight'''
def A__ ( lowercase: int, lowercase: Dict ) -> int:
A : int =torch.load(__snake_case )
A : Optional[Any] =d.pop(__snake_case )
os.makedirs(__snake_case, exist_ok=__snake_case )
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
if __name__ == "__main__":
_lowercase : Dict =argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
_lowercase : Optional[Any] =parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowercase : Tuple =os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
_lowercase : Any =f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 712 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : int =2
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
A , A , A , A : Optional[Any] =bos, unk, pad, eos
A : Dict =[]
A : Union[str, Any] =[]
A : Any ={}
A : int =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : Any =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return self.indices == other.indices
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
A : Union[str, Any] =cls()
d.add_from_file(SCREAMING_SNAKE_CASE__ )
return d
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if word in self.indices and not overwrite:
A : int =self.indices[word]
A : Union[str, Any] =self.count[idx] + n
return idx
else:
A : Tuple =len(self.symbols )
A : str =idx
self.symbols.append(SCREAMING_SNAKE_CASE__ )
self.count.append(SCREAMING_SNAKE_CASE__ )
return idx
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE__ ) )
return
A : str =f.readlines()
A : int =self._load_meta(SCREAMING_SNAKE_CASE__ )
for line in lines[indices_start_line:]:
try:
A , A : Optional[int] =line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A : int =True
A , A : Optional[Any] =line.rsplit(' ' , 1 )
else:
A : Any =False
A : Tuple =int(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE__ ) )
self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A__ ( lowercase: Union[str, Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A : int =dict((re.sub(r'@@$', '', lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', lowercase ), v) for k, v in d.items() )
A : int ='<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : List[Any] =d[k] # restore
return da
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> str:
# prep
if not os.path.exists(lowercase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase, exist_ok=lowercase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[str] =os.path.join(lowercase, 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Optional[Any] =torch.load(lowercase, map_location='cpu' )
A : Any =chkpt['cfg']['model']
# dicts
A : Any =os.path.join(lowercase, 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Dict =Dictionary.load(lowercase )
A : Optional[Any] =rewrite_dict_keys(src_dict.indices )
A : Tuple =len(lowercase )
A : Any =os.path.join(lowercase, VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
A : List[str] =os.path.join(lowercase, 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : List[str] =os.path.join(lowercase, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase, lowercase )
# model config
A : Tuple =os.path.join(lowercase, 'config.json' )
A : Tuple ={
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
A : int =os.path.join(lowercase, lowercase )
A : List[str] ={
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
A : List[Any] =chkpt['model']
# remove unneeded keys
A : List[Any] =[
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
A : str =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A : Union[str, Any] =model_state_dict.pop(lowercase )
else:
A : List[str] =model_state_dict.pop(lowercase )
A : Any =BioGptConfig.from_pretrained(lowercase )
A : str =BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
A : Tuple =os.path.join(lowercase, lowercase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase, lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[Any] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 661 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Optional[Any] ={
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_lowercase : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_lowercase : str =False
_lowercase : Optional[Any] =False
def A__ ( lowercase: Namespace ) -> Optional[int]:
return TrainCommand(lowercase )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Optional[Any] =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE__ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Namespace ) -> List[Any]:
A : Optional[int] =logging.get_logger('transformers-cli/training' )
A : Dict ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =args.output
A : List[str] =args.column_label
A : int =args.column_text
A : Union[str, Any] =args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A : Optional[Any] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
A : Tuple =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Dict =None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
A : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Optional[Any] =args.validation_split
A : str =args.train_batch_size
A : Any =args.valid_batch_size
A : Dict =args.learning_rate
A : List[str] =args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 661 | 0 |
def A__ ( lowercase: Optional[int] ) -> Union[str, Any]:
if collection == []:
return []
# get some information about the collection
A : Any =len(A__ )
A : Tuple =max(A__ )
A : Optional[int] =min(A__ )
# create the counting array
A : Optional[Any] =coll_max + 1 - coll_min
A : Union[str, Any] =[0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1, A__ ):
A : Union[str, Any] =counting_arr[i] + counting_arr[i - 1]
# create the output collection
A : Tuple =[0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0, A__ ) ):
A : Dict =collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def A__ ( lowercase: Optional[int] ) -> Optional[int]:
return "".join([chr(A__ ) for i in counting_sort([ord(A__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
_lowercase : int =input('''Enter numbers separated by a comma:\n''').strip()
_lowercase : Tuple =[int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 714 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Tuple=30 , SCREAMING_SNAKE_CASE__ : int=4_00 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Any=1 / 2_55 , SCREAMING_SNAKE_CASE__ : int=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Optional[Any] =size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
A : Union[str, Any] =parent
A : Union[str, Any] =batch_size
A : Union[str, Any] =num_channels
A : int =min_resolution
A : List[Any] =max_resolution
A : Dict =do_resize
A : Tuple =size
A : List[str] =do_normalize
A : List[Any] =image_mean
A : Dict =image_std
A : Any =do_rescale
A : List[str] =rescale_factor
A : Optional[Any] =do_pad
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Dict:
if not batched:
A : Any =image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
A , A : Union[str, Any] =image.size
else:
A , A : Tuple =image.shape[1], image.shape[2]
if w < h:
A : Any =int(self.size['shortest_edge'] * h / w )
A : Any =self.size['shortest_edge']
elif w > h:
A : Dict =self.size['shortest_edge']
A : Dict =int(self.size['shortest_edge'] * w / h )
else:
A : List[str] =self.size['shortest_edge']
A : Dict =self.size['shortest_edge']
else:
A : List[Any] =[]
for image in image_inputs:
A , A : int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : str =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
A : Tuple =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
A : str =ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
A : str =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
A : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A : List[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : List[str] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : Union[str, Any] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A : Tuple =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Any =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : Optional[int] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A : Optional[int] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Tuple =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : int =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
# prepare image and target
A : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A : List[Any] =json.loads(f.read() )
A : Any ={'image_id': 3_97_69, 'annotations': target}
# encode them
A : str =ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
A : Any =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Optional[Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : List[str] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Dict =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : str =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : Dict =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : Union[str, Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
A : List[Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : int =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
# prepare image, target and masks_path
A : List[str] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A : Optional[int] =json.loads(f.read() )
A : int ={'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
A : Optional[Any] =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A : List[Any] =ConditionalDetrImageProcessor(format='coco_panoptic' )
A : Union[str, Any] =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Dict =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : Dict =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Optional[int] =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : List[Any] =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Any =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : List[Any] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : Any =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : str =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
A : int =82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
A : Any =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : str =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
| 661 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowercase : Optional[Any] =logging.get_logger(__name__)
_lowercase : str ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase : Optional[Any] ={
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
_lowercase : List[str] ={
'''roberta-base''': 5_1_2,
'''roberta-large''': 5_1_2,
'''roberta-large-mnli''': 5_1_2,
'''distilroberta-base''': 5_1_2,
'''roberta-base-openai-detector''': 5_1_2,
'''roberta-large-openai-detector''': 5_1_2,
}
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
'''simple docstring'''
lowercase : List[str] = VOCAB_FILES_NAMES
lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Union[str, Any] = ["input_ids", "attention_mask"]
lowercase : Optional[Any] = RobertaTokenizer
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Any="replace" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : Any="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<mask>" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Any:
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
A : Optional[Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
A : Tuple =getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('type' ) )
A : str =add_prefix_space
A : Optional[Any] =pre_tok_class(**SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =add_prefix_space
A : List[str] ="post_processor"
A : List[Any] =getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
A : Tuple =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A : List[Any] =tuple(state['sep'] )
if "cls" in state:
A : Union[str, Any] =tuple(state['cls'] )
A : str =False
if state.get('add_prefix_space' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
A : Tuple =add_prefix_space
A : Dict =True
if state.get('trim_offsets' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
A : int =trim_offsets
A : Optional[int] =True
if changes_to_apply:
A : List[str] =getattr(SCREAMING_SNAKE_CASE__ , state.pop('type' ) )
A : List[str] =component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
A : Tuple =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
A : Union[str, Any] =value
def SCREAMING_SNAKE_CASE_ ( self : Tuple , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
A : str =kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> int:
A : Dict =kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> List[str]:
A : List[Any] =self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ) -> Optional[int]:
A : List[Any] =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> Optional[int]:
A : Optional[int] =[self.sep_token_id]
A : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 715 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : List[Any] =1_6
_lowercase : Union[str, Any] =3_2
def A__ ( lowercase: Accelerator, lowercase: int = 16, lowercase: str = "bert-base-cased" ) -> Optional[int]:
A : List[Any] =AutoTokenizer.from_pretrained(lowercase )
A : Any =load_dataset('glue', 'mrpc' )
def tokenize_function(lowercase: Any ):
# max_length=None => use the model max length (it's actually the default)
A : List[str] =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A : Any =datasets.map(
lowercase, batched=lowercase, remove_columns=['idx', 'sentence1', 'sentence2'], load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : Dict =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowercase: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase, padding='max_length', max_length=128, return_tensors='pt' )
return tokenizer.pad(lowercase, padding='longest', return_tensors='pt' )
# Instantiate dataloaders.
A : Union[str, Any] =DataLoader(
tokenized_datasets['train'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
A : str =DataLoader(
tokenized_datasets['validation'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
def A__ ( lowercase: Dict, lowercase: Optional[int], lowercase: Any, lowercase: str ) -> Tuple:
model.eval()
A : Tuple =0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A : Tuple =model(**lowercase )
A : Tuple =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A : Union[str, Any] =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
A : List[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
A : Optional[int] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase, references=lowercase, )
A : Union[str, Any] =metric.compute()
return eval_metric["accuracy"]
def A__ ( lowercase: Union[str, Any], lowercase: Dict ) -> List[str]:
# Initialize accelerator
A : Optional[int] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : int =config['lr']
A : Optional[Any] =int(config['num_epochs'] )
A : Union[str, Any] =int(config['seed'] )
A : List[str] =int(config['batch_size'] )
A : Optional[Any] =args.model_name_or_path
set_seed(lowercase )
A , A : str =get_dataloaders(lowercase, lowercase, lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : List[str] =AutoModelForSequenceClassification.from_pretrained(lowercase, return_dict=lowercase )
# Instantiate optimizer
A : Any =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A : List[str] =optimizer_cls(params=model.parameters(), lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
A : Optional[int] =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A : Dict =1
A : Union[str, Any] =(len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A : List[Any] =get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=0, num_training_steps=lowercase, )
else:
A : List[str] =DummyScheduler(lowercase, total_num_steps=lowercase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A : Optional[int] =accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# We need to keep track of how many total steps we have iterated over
A : Tuple =0
# We also need to keep track of the stating epoch so files are named properly
A : List[str] =0
A : Tuple =evaluate.load('glue', 'mrpc' )
A : Optional[int] =num_epochs
if args.partial_train_epoch is not None:
A : Dict =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A : List[Any] =args.resume_from_checkpoint.split('epoch_' )[1]
A : List[Any] =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A : Union[str, Any] =int(lowercase ) + 1
A : List[str] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
accelerator.print('resumed checkpoint performance:', lowercase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:', lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:', optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir, F'state_{starting_epoch-1}.json' ), 'r' ) as f:
A : Union[str, Any] =json.load(lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A : str ={}
for epoch in range(lowercase, lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
A : Tuple =model(**lowercase )
A : List[Any] =outputs.loss
A : Any =loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A : Union[str, Any] =F'epoch_{epoch}'
A : Optional[Any] =os.path.join(args.output_dir, lowercase )
accelerator.save_state(lowercase )
A : Optional[Any] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
A : Dict =accuracy
A : Optional[Any] =lr_scheduler.get_lr()[0]
A : Any =optimizer.param_groups[0]['lr']
A : str =epoch
A : Dict =overall_step
accelerator.print(F'epoch {epoch}:', lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'state_{epoch}.json' ), 'w' ) as f:
json.dump(lowercase, lowercase )
def A__ ( ) -> Optional[int]:
A : Optional[int] =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path', type=lowercase, default='bert-base-cased', help='Path to pretrained model or model identifier from huggingface.co/models.', required=lowercase, )
parser.add_argument(
'--output_dir', type=lowercase, default='.', help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.', )
parser.add_argument(
'--resume_from_checkpoint', type=lowercase, default=lowercase, help='If the training should continue from a checkpoint folder.', )
parser.add_argument(
'--partial_train_epoch', type=lowercase, default=lowercase, help='If passed, the training will stop after this number of epochs.', )
parser.add_argument(
'--num_epochs', type=lowercase, default=2, help='Number of train epochs.', )
A : str =parser.parse_args()
A : Optional[int] ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 661 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowercase : Tuple ={
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple =[
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowercase : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716 |
def A__ ( lowercase: int ) -> int:
if not isinstance(lowercase, lowercase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A : Any =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowercase : Tuple = StableUnCLIPPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS
lowercase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowercase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
A : int =32
A : str =embedder_hidden_size
# prior components
torch.manual_seed(0 )
A : str =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A : List[Any] =CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=lowerCAmelCase_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
A : Any =PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCAmelCase_ , num_layers=1 , )
torch.manual_seed(0 )
A : Any =DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=10_00 , clip_sample=lowerCAmelCase_ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
A : List[str] =StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase_ )
A : Tuple =DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
A : Tuple =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A : int =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
A : Optional[Any] =UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase_ , layers_per_block=1 , upcast_attention=lowerCAmelCase_ , use_linear_projection=lowerCAmelCase_ , )
torch.manual_seed(0 )
A : Tuple =DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='v_prediction' , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
A : Optional[Any] =AutoencoderKL()
A : Optional[int] ={
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=0 ) -> int:
if str(lowerCAmelCase_ ).startswith('mps' ):
A : str =torch.manual_seed(lowerCAmelCase_ )
else:
A : Tuple =torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
A : Any ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
A : int =torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
A : Optional[int] =torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase_ )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[Any]:
A : List[str] =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
A : int =StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A : Union[str, Any] =torch.Generator(device='cpu' ).manual_seed(0 )
A : Optional[int] =pipe('anime turle' , generator=lowerCAmelCase_ , output_type='np' )
A : List[Any] =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A : Dict =StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
A : Optional[Any] =pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A : Dict =pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
A : Tuple =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 717 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A__ ( *lowercase: Tuple, lowercase: Optional[Union[Dict, Any]] = None, lowercase: Dict=True, lowercase: Any=2 ) -> List[Any]:
from .. import __version__
A : Optional[Any] =take_from
A : Union[str, Any] =()
if not isinstance(args[0], lowercase ):
A : List[str] =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase ).base_version ) >= version.parse(lowercase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
A : Tuple =None
if isinstance(lowercase, lowercase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase ),)
A : Union[str, Any] =F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(lowercase, lowercase ):
values += (getattr(lowercase, lowercase ),)
A : Optional[Any] =F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A : List[Any] =F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A : List[Any] =warning + ' ' if standard_warn else ''
warnings.warn(warning + message, lowercase, stacklevel=lowercase )
if isinstance(lowercase, lowercase ) and len(lowercase ) > 0:
A : Any =inspect.getouterframes(inspect.currentframe() )[1]
A : int =call_frame.filename
A : int =call_frame.lineno
A : Optional[int] =call_frame.function
A , A : int =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(lowercase ) == 0:
return
elif len(lowercase ) == 1:
return values[0]
return values
| 661 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
A : Dict =params
A : int =np.array(UpperCAmelCase__ )
A : Optional[Any] =np.array([len(UpperCAmelCase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[Any] ) -> int:
return len(self.lengths )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
A : Optional[int] =self.params.max_model_input_size
A : List[Any] =self.lengths > max_len
logger.info(f'Splitting {sum(UpperCAmelCase__ )} too long sequences.' )
def divide_chunks(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
return [l[i : i + n] for i in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ )]
A : Optional[int] =[]
A : str =[]
if self.params.mlm:
A : Optional[Any] =self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
A : Optional[Any] =self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
A : Union[str, Any] =[]
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
A : int =np.insert(UpperCAmelCase__ , 0 , UpperCAmelCase__ )
if sub_s[-1] != sep_id:
A : Dict =np.insert(UpperCAmelCase__ , len(UpperCAmelCase__ ) , UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCAmelCase__ )
new_tok_ids.extend(UpperCAmelCase__ )
new_lengths.extend([len(UpperCAmelCase__ ) for l in sub_seqs] )
A : Optional[Any] =np.array(UpperCAmelCase__ )
A : Dict =np.array(UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
A : str =len(self )
A : List[Any] =self.lengths > 11
A : Any =self.token_ids[indices]
A : Optional[Any] =self.lengths[indices]
A : Dict =len(self )
logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
if "unk_token" not in self.params.special_tok_ids:
return
else:
A : Optional[Any] =self.params.special_tok_ids['''unk_token''']
A : str =len(self )
A : List[str] =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
A : int =(unk_occs / self.lengths) < 0.5
A : str =self.token_ids[indices]
A : List[Any] =self.lengths[indices]
A : List[Any] =len(self )
logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
if not self.params.is_master:
return
logger.info(f'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> int:
A : Union[str, Any] =[t[0] for t in batch]
A : Any =[t[1] for t in batch]
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
# Max for paddings
A : Any =max(UpperCAmelCase__ )
# Pad token ids
if self.params.mlm:
A : List[str] =self.params.special_tok_ids['''pad_token''']
else:
A : Optional[int] =self.params.special_tok_ids['''unk_token''']
A : Optional[int] =[list(t.astype(UpperCAmelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(UpperCAmelCase__ )) for t in token_ids]
assert len(tk_ ) == len(UpperCAmelCase__ )
assert all(len(UpperCAmelCase__ ) == max_seq_len_ for t in tk_ )
A : Dict =torch.tensor(tk_ ) # (bs, max_seq_len_)
A : List[str] =torch.tensor(UpperCAmelCase__ ) # (bs)
return tk_t, lg_t
| 718 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( lowercase: int, lowercase: str ) -> Dict:
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Dict, lowercase: Tuple, lowercase: str ) -> str:
A : Any =tmp_path / 'cache'
A : Dict ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Dict =JsonDatasetReader(lowercase, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Any, lowercase: Union[str, Any] ) -> Tuple:
A : Tuple =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : Optional[Any] =features.copy() if features else default_expected_features
A : Union[str, Any] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : str =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
], )
def A__ ( lowercase: Optional[int], lowercase: str, lowercase: Dict ) -> Optional[int]:
A : int =tmp_path / 'cache'
A : Tuple ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
A : int =features.copy() if features else default_expected_features
A : str =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( lowercase: Optional[Any], lowercase: str ) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A : str ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
A : Dict =features.copy()
A : List[str] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : int =tmp_path / 'cache'
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Union[str, Any], lowercase: Any, lowercase: str ) -> Optional[Any]:
A : Optional[int] =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =JsonDatasetReader(lowercase, cache_dir=lowercase, split=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def A__ ( lowercase: Optional[Any], lowercase: int, lowercase: Union[str, Any] ) -> List[Any]:
if issubclass(lowercase, lowercase ):
A : int =jsonl_path
elif issubclass(lowercase, lowercase ):
A : Any =[jsonl_path]
A : Optional[Any] =tmp_path / 'cache'
A : Tuple ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[str] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
def A__ ( lowercase: List[str], lowercase: Tuple, lowercase: Optional[Any]=("train",) ) -> Tuple:
assert isinstance(lowercase, lowercase )
for split in splits:
A : List[str] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Tuple, lowercase: Optional[int], lowercase: Any ) -> str:
A : List[str] =tmp_path / 'cache'
A : Union[str, Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : str =JsonDatasetReader({'train': jsonl_path}, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Optional[int] ) -> Tuple:
A : Any =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =features.copy() if features else default_expected_features
A : Dict =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[Any] =JsonDatasetReader({'train': jsonl_path}, features=lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any] ) -> Tuple:
if split:
A : Optional[int] ={split: jsonl_path}
else:
A : Dict ='train'
A : Optional[Any] ={'train': jsonl_path, 'test': jsonl_path}
A : Tuple =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[Any] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( lowercase: List[Any] ) -> Tuple:
return json.load(lowercase )
def A__ ( lowercase: List[Any] ) -> Tuple:
return [json.loads(lowercase ) for line in buffer]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : Any =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : List[Any] =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
A : Union[str, Any] =tmp_path_factory.mktemp('data' ) / f'test.json.{extension}'
A : Union[str, Any] =str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : str =f.read()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : List[str] =f.read()
assert exported_content == original_content
| 661 | 0 |
_lowercase : List[Any] =[4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowercase : List[str] =[3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowercase : Union[str, Any] ={
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def A__ ( lowercase: Optional[Any], lowercase: Optional[Any], lowercase: Optional[Any] ) -> str:
assert len(str(lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
A : Tuple =year // 100
A : Union[str, Any] =(5 * (century % 4) + 2) % 7
A : List[str] =year % 100
A : Union[str, Any] =centurian % 12
A : Dict =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
A : Any =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
A : List[str] =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] = DDIMPipeline
lowercase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
A : str =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
A : Optional[int] =DDIMScheduler()
A : Optional[Any] ={'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
A : List[Any] =torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A : Union[str, Any] =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A : Optional[int] ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
A : Union[str, Any] ='cpu'
A : Tuple =self.get_dummy_components()
A : Union[str, Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : str =self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
A : str =pipe(**SCREAMING_SNAKE_CASE__ ).images
A : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A : Optional[Any] =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Any ='google/ddpm-cifar10-32'
A : Optional[int] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMScheduler()
A : int =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Dict =torch.manual_seed(0 )
A : Optional[Any] =ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='numpy' ).images
A : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Tuple =np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : Optional[int] ='google/ddpm-ema-bedroom-256'
A : str =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : Optional[int] =ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
A : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Optional[int] =np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 661 | 0 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def A__ ( lowercase: bool = True, *lowercase: Any, **lowercase: List[Any] ) -> List[str]:
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
A : Any =False
if main_process_only:
A : Union[str, Any] =PartialState().local_process_index == 0
return _tqdm(*__a, **__a, disable=__a )
| 720 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
A : Dict =tempfile.mkdtemp()
A : int =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
A : str =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Optional[int] =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
A : Optional[int] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : str =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Union[str, Any] =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
A : Optional[Any] =self.get_image_processor()
A : Optional[Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Dict =self.prepare_image_inputs()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : Optional[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
A : str =self.get_image_processor()
A : Union[str, Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : str =[torch.ones((1, 3, 5, 5) )]
A : Optional[Any] =[[17_64, 26_46]]
A : List[Any] =[[6_83, 10_24]]
A : Union[str, Any] =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : str =[np.ones((1, 3, 5, 5) )]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A : Any =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
A : Tuple =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
A : Optional[Any] =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Any =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
A : Optional[Any] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[Any] =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Dict =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
A : Any =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : Tuple =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : List[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
A : int =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =[tf.ones((1, 3, 5, 5) )]
A : Tuple =[[17_64, 26_46]]
A : Union[str, Any] =[[6_83, 10_24]]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : List[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : Any =[np.ones((1, 3, 5, 5) )]
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A : List[str] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
A : Optional[int] =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Dict =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
A : Any =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Tuple =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
A : Optional[Any] =self.get_image_processor()
A : Dict =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A : Optional[int] =[tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )]
A : Union[str, Any] =[torch.tensor(SCREAMING_SNAKE_CASE__ )]
A : int =[[17_64, 26_46]]
A : int =[[6_83, 10_24]]
A : Dict =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
A : Union[str, Any] =self.get_image_processor()
A : int =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : List[Any] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Tuple =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
A : Dict =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
| 661 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
lowercase : str = DiTPipeline
lowercase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase : Any = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
A : Union[str, Any] =TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case_ , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case_ , )
A : int =AutoencoderKL()
A : List[str] =DDIMScheduler()
A : Dict ={"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int=0 ) -> Any:
if str(snake_case_ ).startswith('mps' ):
A : Dict =torch.manual_seed(snake_case_ )
else:
A : List[Any] =torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A : List[Any] ={
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple:
A : Any ="cpu"
A : str =self.get_dummy_components()
A : Union[str, Any] =self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
A : Tuple =self.get_dummy_inputs(snake_case_ )
A : List[str] =pipe(**snake_case_ ).images
A : Optional[int] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
A : List[str] =np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
A : Optional[Any] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Union[str, Any]:
self._test_inference_batch_single_identical(relax_max_difference=snake_case_ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
A : List[str] =torch.manual_seed(0 )
A : Dict =DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
A : Union[str, Any] =["vase", "umbrella", "white shark", "white wolf"]
A : Optional[Any] =pipe.get_label_ids(snake_case_ )
A : Tuple =pipe(snake_case_ , generator=snake_case_ , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(snake_case_ , snake_case_ ):
A : str =load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
A : int =DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
A : Optional[Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
A : str =["vase", "umbrella"]
A : Any =pipe.get_label_ids(snake_case_ )
A : List[str] =torch.manual_seed(0 )
A : Tuple =pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(snake_case_ , snake_case_ ):
A : Any =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 721 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_lowercase : Optional[Any] =WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def A__ ( lowercase: Optional[int] ) -> Optional[int]:
A : str =test_results.split(' ' )
A : List[str] =0
A : Tuple =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A : List[str] =expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( lowercase: List[Any] ) -> str:
A : Union[str, Any] ={}
A : Optional[Any] =None
A : Union[str, Any] =False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]', lowercase ):
A : List[Any] =True
A : Any =line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
A : Dict =line
A : List[str] =False
return failures
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
A : Tuple =title
A : Dict =doc_test_results['time_spent'].split(',' )[0]
A : Union[str, Any] =doc_test_results['success']
A : Any =doc_test_results['failures']
A : Optional[Any] =self.n_success + self.n_failures
# Failures and success of the modeling tests
A : Union[str, Any] =doc_test_results
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
A : Any =[self._time_spent]
A : List[str] =0
for time in time_spent:
A : List[Any] =time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
A : List[str] =[0, 0, time_parts[0]]
A , A , A : Tuple =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A , A , A : str =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Tuple =40
A : Optional[Any] ={k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
A : Any =''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : Optional[int] =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
A : Tuple =[
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
A : Any =f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
A : Dict =client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
A : List[str] =''
for key, value in failures.items():
A : Any =value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
A : Union[str, Any] =job_name
A : Any ={'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
A : int ={
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
A : Union[str, Any] =self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
A : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
A : Any =f'*Num failures* :{len(job_result["failed"] )} \n'
A : List[Any] =job_result['failures']
A : Any =self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def A__ ( ) -> Union[str, Any]:
A : Any =os.environ['GITHUB_RUN_ID']
A : List[Any] =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
A : Union[str, Any] =requests.get(lowercase ).json()
A : List[Any] ={}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
A : List[str] =math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowercase ):
A : List[str] =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.', lowercase )
return {}
def A__ ( lowercase: str ) -> Optional[Any]:
A : Any ={}
if os.path.exists(lowercase ):
A : List[Any] =os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase, lowercase ), encoding='utf-8' ) as f:
A : Optional[int] =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase, lowercase )}.' ) from e
return _artifact
def A__ ( ) -> int:
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
A : Dict =name
A : Dict =[]
def __str__( self : Optional[Any] ) -> List[str]:
return self.name
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.paths.append({'name': self.name, 'path': path} )
A : Dict[str, Artifact] ={}
A : str =filter(os.path.isdir, os.listdir() )
for directory in directories:
A : Tuple =directory
if artifact_name not in _available_artifacts:
A : int =Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
_lowercase : Optional[int] =get_job_links()
_lowercase : str =retrieve_available_artifacts()
_lowercase : List[Any] =collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_lowercase : Optional[Any] ={
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_lowercase : List[Any] =github_actions_job_links.get('''run_doctests''')
_lowercase : int =available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_lowercase : Dict =retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_lowercase , _lowercase , _lowercase : List[Any] =handle_test_results(artifact['''stats'''])
_lowercase : Any =failed
_lowercase : Union[str, Any] =success
_lowercase : str =time_spent[1:-1] + ''', '''
_lowercase : Any =extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_lowercase : Tuple =line.replace('''FAILED ''', '''''')
_lowercase : int =line.split()[0].replace('''\n''', '''''')
if "::" in line:
_lowercase , _lowercase : str =line.split('''::''')
else:
_lowercase , _lowercase : Union[str, Any] =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_lowercase : Any =docs[file_regex]
doc_test_results[category]["failed"].append(test)
_lowercase : Any =all_failures[test] if test in all_failures else '''N/A'''
_lowercase : Tuple =failure
break
_lowercase : Optional[int] =Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 661 | 0 |
def A__ ( lowercase: int ) -> bool:
return str(lowercase ) == str(lowercase )[::-1]
def A__ ( lowercase: int ) -> int:
return int(lowercase ) + int(str(lowercase )[::-1] )
def A__ ( lowercase: int = 10_000 ) -> int:
A : List[str] =[]
for num in range(1, lowercase ):
A : Any =0
A : Any =num
while iterations < 50:
A : List[str] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 700 |
_lowercase : Dict ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 661 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=13 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Dict=99 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : int=37 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_28 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : List[str]=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> List[Any]:
A : Union[str, Any] =parent
A : List[Any] =batch_size
A : Tuple =seq_length
A : List[str] =is_training
A : Optional[Any] =use_input_mask
A : Dict =use_token_type_ids
A : Any =use_labels
A : Tuple =vocab_size
A : str =hidden_size
A : List[str] =num_hidden_layers
A : Union[str, Any] =num_attention_heads
A : int =intermediate_size
A : str =hidden_act
A : List[str] =hidden_dropout_prob
A : int =attention_probs_dropout_prob
A : str =max_position_embeddings
A : Any =type_vocab_size
A : str =type_sequence_label_size
A : List[str] =initializer_range
A : Dict =num_labels
A : Dict =num_choices
A : Dict =scope
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Any =None
if self.use_input_mask:
A : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
A : List[Any] =None
if self.use_token_type_ids:
A : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : Optional[Any] =None
A : Union[str, Any] =None
A : Dict =None
if self.use_labels:
A : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : Any =ids_tensor([self.batch_size] , self.num_choices )
A : str =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Optional[Any] =self.prepare_config_and_inputs()
A : Optional[int] =True
A : Any =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
A : Dict =NezhaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : Any =model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
A : str =model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
A : Tuple =model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Optional[Any]:
A : int =True
A : str =NezhaModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : Optional[Any] =model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
A : List[Any] =model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )
A : Dict =model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
A : Tuple =NezhaForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : int =model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
A : Optional[int] =NezhaForNextSentencePrediction(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : List[Any] =model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
A : Optional[int] =NezhaForPreTraining(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : Optional[Any] =model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , next_sentence_label=_lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
A : List[str] =NezhaForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : Dict =model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
A : Union[str, Any] =self.num_labels
A : Dict =NezhaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : int =model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
A : Tuple =self.num_labels
A : str =NezhaForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : Tuple =model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
A : List[str] =self.num_choices
A : Union[str, Any] =NezhaForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Union[str, Any] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Tuple =model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
A : List[str] =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Any =config_and_inputs
A : int ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowercase : str = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase : List[str] = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : List[Any] = True
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Optional[Any]:
A : Tuple =super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class in get_values(_lowerCamelCase ):
A : Tuple =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCamelCase )
A : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Any:
A : Any =NezhaModelTester(self )
A : int =ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
A : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
A : Tuple =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : str =self.model_tester.prepare_config_and_inputs_for_decoder()
A : Dict =None
self.model_tester.create_and_check_model_as_decoder(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Any:
A : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> str:
A : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
A : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
A : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
A : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[Any]:
A : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
A : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[int] =NezhaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
A , A : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
A : str =True
A : Any =model_class(config=_lowerCamelCase )
A : Union[str, Any] =self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A : int =torch.jit.trace(
_lowerCamelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowerCamelCase , os.path.join(_lowerCamelCase , 'bert.pt' ) )
A : Tuple =torch.jit.load(os.path.join(_lowerCamelCase , 'bert.pt' ) , map_location=_lowerCamelCase )
loaded(inputs_dict['input_ids'].to(_lowerCamelCase ) , inputs_dict['attention_mask'].to(_lowerCamelCase ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
A : Dict =NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
A : Optional[Any] =torch.tensor([[0, 1, 2, 3, 4, 5]] )
A : List[str] =torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A : List[str] =model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
A : Tuple =torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , _lowerCamelCase )
A : Optional[int] =torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
A : Optional[Any] =NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
A : Dict =torch.tensor([[0, 1, 2, 3, 4, 5]] )
A : Optional[int] =torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A : Union[str, Any] =model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
A : Optional[int] =torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape , _lowerCamelCase )
A : int =torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
| 701 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( lowercase: str ) -> List[str]:
def decorator(lowercase: int ):
A : Tuple =getattr(lowercase, 'handle_key', [] )
handle += [key]
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
def A__ ( *lowercase: List[str] ) -> Dict:
def decorator(lowercase: Union[str, Any] ):
A : Optional[int] =getattr(lowercase, 'handle_key', [] )
handle += keys
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __new__( cls : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
A : Dict =super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , 'key_handler' ):
setattr(SCREAMING_SNAKE_CASE__ , 'key_handler' , {} )
setattr(SCREAMING_SNAKE_CASE__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
A : Optional[Any] =getattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , [] )
for key in handled_keys:
A : str =value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ) -> Any:
A : str =get_character()
if char != KEYMAP["undefined"]:
A : List[str] =ord(SCREAMING_SNAKE_CASE__ )
A : List[str] =cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
A : List[str] =char
return handler(cls )
else:
return None
def A__ ( cls: Optional[int] ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 661 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
_lowercase : Any =None
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_lowercase : Union[str, Any] ={
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
_lowercase : Any ={
'google/bigbird-roberta-base': 4_0_9_6,
'google/bigbird-roberta-large': 4_0_9_6,
'google/bigbird-base-trivia-itc': 4_0_9_6,
}
_lowercase : Union[str, Any] ='▁'
class SCREAMING_SNAKE_CASE_ ( lowerCamelCase__ ):
'''simple docstring'''
lowercase : Tuple = VOCAB_FILES_NAMES
lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = BigBirdTokenizer
lowercase : Tuple = ["input_ids", "attention_mask"]
lowercase : str = []
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : Any="[MASK]" , SCREAMING_SNAKE_CASE__ : Dict="[CLS]" , **SCREAMING_SNAKE_CASE__ : str , ) -> str:
A : List[Any] =AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
A : Any =AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
A : Optional[int] =AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
A : Union[str, Any] =AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
A : Optional[int] =AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
A : str =AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A : Optional[Any] =AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
A : List[Any] =vocab_file
A : List[Any] =False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int = None ) -> List[int]:
A : Any =[self.sep_token_id]
A : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : int = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any = None ) -> List[int]:
A : List[str] =[self.sep_token_id]
A : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A : Dict =os.path.join(
__lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 702 |
import math
def A__ ( lowercase: int ) -> list:
A : Optional[Any] =[True] * n
A : Tuple =False
A : List[Any] =False
A : Dict =True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A : Dict =i * 2
while index < n:
A : Dict =False
A : Dict =index + i
A : Tuple =[2]
for i in range(3, lowercase, 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def A__ ( lowercase: int = 999_966_663_333 ) -> int:
A : Optional[int] =math.floor(math.sqrt(lowercase ) ) + 100
A : Optional[int] =prime_sieve(lowercase )
A : Optional[Any] =0
A : List[Any] =0
A : Union[str, Any] =primes[prime_index]
while (last_prime**2) <= limit:
A : Tuple =primes[prime_index + 1]
A : Optional[int] =last_prime**2
A : Tuple =next_prime**2
# Get numbers divisible by lps(current)
A : int =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A : List[Any] =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A : Any =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A : List[str] =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 661 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
A : Optional[Any] =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=_lowercase ).to(_lowercase )
A : Optional[int] =AutoTokenizer.from_pretrained('google/mt5-small' )
A : List[Any] =tokenizer('Hello there' , return_tensors='pt' ).input_ids
A : Dict =tokenizer('Hi I am' , return_tensors='pt' ).input_ids
A : Optional[int] =model(input_ids.to(_lowercase ) , labels=labels.to(_lowercase ) ).loss
A : List[str] =-(labels.shape[-1] * loss.item())
A : Optional[int] =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 703 |
import heapq
def A__ ( lowercase: dict ) -> set[int]:
A : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase, [-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
A : Dict =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A : List[str] =heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A : str =elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 661 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Optional[Any] ='''▁'''
_lowercase : Tuple ={'''vocab_file''': '''prophetnet.tokenizer'''}
_lowercase : Optional[Any] ={
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_lowercase : Optional[Any] ={
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_lowercase : List[str] ={
'''microsoft/xprophetnet-large-wiki100-cased''': 5_1_2,
}
def A__ ( lowercase: Union[str, Any] ) -> Dict:
A : Any =collections.OrderedDict()
with open(__SCREAMING_SNAKE_CASE, 'r', encoding='utf-8' ) as reader:
A : Optional[Any] =reader.readlines()
for index, token in enumerate(__SCREAMING_SNAKE_CASE ):
A : str =token.rstrip('\n' )
A : Any =index
return vocab
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
'''simple docstring'''
lowercase : int = VOCAB_FILES_NAMES
lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Any = ["input_ids", "attention_mask"]
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]="[SEP]" , SCREAMING_SNAKE_CASE__ : Optional[Any]="[SEP]" , SCREAMING_SNAKE_CASE__ : List[str]="[SEP]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[UNK]" , SCREAMING_SNAKE_CASE__ : Tuple="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : Dict="[MASK]" , SCREAMING_SNAKE_CASE__ : Any = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str:
A : List[str] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
A : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
A : List[str] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
A : Optional[Any] ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
A : List[Any] =f'[unused{i}]'
A : str =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
A : str =12
A : Optional[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowercase_ )
def __getstate__( self : Optional[int] ) -> Any:
A : List[str] =self.__dict__.copy()
A : Dict =None
return state
def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
A : List[str] =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A : Optional[Any] ={}
A : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : List[str] = False ) -> List[str]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return ([0] * len(lowercase_ )) + [1]
return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple = None ) -> Optional[int]:
A : int =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
return len(self.sp_model ) + self.fairseq_offset
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : int ={self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A : List[Any] =self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
A : Union[str, Any] ="".join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any = None ) -> str:
if not os.path.isdir(lowercase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A : Optional[Any] =os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
A : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] = None ) -> Optional[Any]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
A : int =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 704 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase : List[Any] =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
A : Tuple =feature_size
A : int =sampling_rate
A : List[str] =padding_value
A : Tuple =kwargs.pop('padding_side' , 'right' )
A : str =kwargs.pop('return_attention_mask' , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A : Tuple ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
A : Dict =processed_features[self.model_input_names[0]]
A : int =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE__ ) == 0:
if return_attention_mask:
A : List[Any] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A : List[str] =required_input[0]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A : Any =0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE__ ):
A : Dict =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
A : List[Any] ='tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
A : Optional[int] ='pt'
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float, list, tuple, np.ndarray) ):
A : Union[str, Any] ='np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A : int =to_numpy(SCREAMING_SNAKE_CASE__ )
else:
A : List[Any] =[to_numpy(SCREAMING_SNAKE_CASE__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A : List[Any] =self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =processed_features[self.model_input_names[0]]
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if not all(len(SCREAMING_SNAKE_CASE__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A : Tuple =[]
for i in range(SCREAMING_SNAKE_CASE__ ):
A : int ={k: v[i] for k, v in processed_features.items()}
# truncation
A : List[Any] =self._truncate(
SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )
truncated_inputs.append(SCREAMING_SNAKE_CASE__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A : Any =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A : Optional[Any] =PaddingStrategy.MAX_LENGTH
A : List[Any] ={}
for i in range(SCREAMING_SNAKE_CASE__ ):
# padding
A : Optional[Any] =self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
for key, value in outputs.items():
if key not in batch_outputs:
A : Dict =[]
if value.dtype is np.dtype(np.floataa ):
A : Tuple =value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE__ )
return BatchFeature(SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> dict:
A : Optional[int] =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Tuple =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : int =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A : str =np.ones(len(SCREAMING_SNAKE_CASE__ ) , dtype=np.intaa )
if needs_to_be_padded:
A : Union[str, Any] =max_length - len(SCREAMING_SNAKE_CASE__ )
if self.padding_side == "right":
if return_attention_mask:
A : Dict =np.pad(
processed_features['attention_mask'] , (0, difference) )
A : str =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A : List[Any] =np.pad(
processed_features['attention_mask'] , (difference, 0) )
A : Union[str, Any] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A : Tuple =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Any =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : List[str] =len(SCREAMING_SNAKE_CASE__ ) > max_length
if needs_to_be_truncated:
A : Union[str, Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A : Dict =processed_features['attention_mask'][:max_length]
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
A : List[Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Tuple =PaddingStrategy(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Optional[int] =padding
else:
A : List[str] =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 661 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase ):
'''simple docstring'''
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[Any]:
A : List[Any] =EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
A : List[str] =BertTokenizer.from_pretrained('bert-base-uncased' )
A : Union[str, Any] =bertabert.config.encoder.vocab_size
A : str =tokenizer.sep_token_id
A : str =tokenizer.cls_token_id
A : Dict =1_28
A : str =datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
A : Union[str, Any] =datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
A : List[str] =train_dataset.select(range(32 ) )
A : List[str] =val_dataset.select(range(16 ) )
A : Dict =4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
A : Union[str, Any] =tokenizer(batch['article'] , padding='max_length' , truncation=A_ , max_length=5_12 )
A : List[str] =tokenizer(batch['highlights'] , padding='max_length' , truncation=A_ , max_length=1_28 )
A : int =inputs.input_ids
A : Any =inputs.attention_mask
A : str =outputs.input_ids
A : Any =outputs.input_ids.copy()
A : Any =[
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
A : Any =outputs.attention_mask
assert all(len(A_ ) == 5_12 for x in inputs.input_ids )
assert all(len(A_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE__ : str ):
A : List[Any] =pred.label_ids
A : Tuple =pred.predictions
# all unnecessary tokens are removed
A : int =tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
A : str =tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
A : Optional[Any] =sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ )
return {"accuracy": accuracy}
# map train dataset
A : Optional[int] =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=A_ , batch_size=A_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
A : Dict =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=A_ , batch_size=A_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
A : int =self.get_auto_remove_tmp_dir()
A : Any =SeqaSeqTrainingArguments(
output_dir=A_ , per_device_train_batch_size=A_ , per_device_eval_batch_size=A_ , predict_with_generate=A_ , evaluation_strategy='steps' , do_train=A_ , do_eval=A_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
A : Union[str, Any] =SeqaSeqTrainer(
model=A_ , args=A_ , compute_metrics=_compute_metrics , train_dataset=A_ , eval_dataset=A_ , tokenizer=A_ , )
# start training
trainer.train()
| 705 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : List[str] ={
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : int = "deberta-v2"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str=12_81_00 , SCREAMING_SNAKE_CASE__ : List[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Dict=24 , SCREAMING_SNAKE_CASE__ : List[str]=24 , SCREAMING_SNAKE_CASE__ : List[str]=61_44 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-7 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=-1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : Dict =hidden_size
A : Optional[Any] =num_hidden_layers
A : Optional[int] =num_attention_heads
A : Optional[int] =intermediate_size
A : Any =hidden_act
A : Any =hidden_dropout_prob
A : Union[str, Any] =attention_probs_dropout_prob
A : Optional[Any] =max_position_embeddings
A : Tuple =type_vocab_size
A : Tuple =initializer_range
A : int =relative_attention
A : int =max_relative_positions
A : Optional[Any] =pad_token_id
A : Union[str, Any] =position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE__ ) == str:
A : Any =[x.strip() for x in pos_att_type.lower().split('|' )]
A : Any =pos_att_type
A : Tuple =vocab_size
A : Any =layer_norm_eps
A : Optional[Any] =kwargs.get('pooler_hidden_size' , SCREAMING_SNAKE_CASE__ )
A : str =pooler_dropout
A : Any =pooler_hidden_act
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A : List[Any] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A : int ={0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
return 12
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
A : str =super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 661 | 0 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=__lowercase ):
'''simple docstring'''
lowercase : str = ["onnx"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
requires_backends(self , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
requires_backends(cls , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
requires_backends(cls , ['onnx'] )
| 706 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Tuple = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 5_02_57 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : int = 7_68 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "gelu_new" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 1e-5 , SCREAMING_SNAKE_CASE__ : float = 0.0_2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[str]:
super().__init__()
A : str =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
A : List[Any] =prefix_inner_dim
A : Dict =prefix_hidden_dim
A : List[str] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A : Dict =GPTaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , n_positions=SCREAMING_SNAKE_CASE__ , n_embd=SCREAMING_SNAKE_CASE__ , n_layer=SCREAMING_SNAKE_CASE__ , n_head=SCREAMING_SNAKE_CASE__ , n_inner=SCREAMING_SNAKE_CASE__ , activation_function=SCREAMING_SNAKE_CASE__ , resid_pdrop=SCREAMING_SNAKE_CASE__ , embd_pdrop=SCREAMING_SNAKE_CASE__ , attn_pdrop=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ , initializer_range=SCREAMING_SNAKE_CASE__ , scale_attn_weights=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE__ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE__ , )
A : Dict =GPTaLMHeadModel(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , ) -> Optional[Any]:
A : str =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
A : Any =self.encode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.decode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A : int =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A : Optional[int] =torch.cat((dummy_token, input_ids) , dim=1 )
A : Dict =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : torch.device ) -> torch.Tensor:
return torch.zeros(SCREAMING_SNAKE_CASE__ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
return self.encode_prefix(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
A : Dict =torch.split(SCREAMING_SNAKE_CASE__ , 1 , dim=0 )
A : int =[]
A : Optional[int] =[]
for feature in features:
A : int =self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE__ ) ) # back to the clip feature
# Only support beam search for now
A , A : Dict =self.generate_beam(
input_embeds=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A : str =torch.stack(SCREAMING_SNAKE_CASE__ )
A : int =torch.stack(SCREAMING_SNAKE_CASE__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : int = 67 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Dict:
A : Dict =eos_token_id
A : str =None
A : List[Any] =None
A : List[Any] =torch.ones(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.int )
A : str =torch.zeros(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.bool )
if input_embeds is not None:
A : Any =input_embeds
else:
A : List[Any] =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
A : Any =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ )
A : str =outputs.logits
A : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A : List[str] =logits.softmax(-1 ).log()
if scores is None:
A , A : Any =logits.topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Any =generated.expand(SCREAMING_SNAKE_CASE__ , *generated.shape[1:] )
A , A : Tuple =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A : Union[str, Any] =next_tokens
else:
A : str =tokens.expand(SCREAMING_SNAKE_CASE__ , *tokens.shape[1:] )
A : Optional[int] =torch.cat((tokens, next_tokens) , dim=1 )
else:
A : Optional[Any] =-float(np.inf )
A : Tuple =0
A : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A : int =scores_sum / seq_lengths[:, None]
A , A : Optional[int] =scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Dict =next_tokens // scores_sum.shape[1]
A : Optional[Any] =seq_lengths[next_tokens_source]
A : Tuple =next_tokens % scores_sum.shape[1]
A : Optional[Any] =next_tokens.unsqueeze(1 )
A : Optional[Any] =tokens[next_tokens_source]
A : Any =torch.cat((tokens, next_tokens) , dim=1 )
A : List[str] =generated[next_tokens_source]
A : List[Any] =scores_sum_average * seq_lengths
A : Optional[Any] =is_stopped[next_tokens_source]
A : Optional[int] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A : Any =torch.cat((generated, next_token_embed) , dim=1 )
A : Optional[int] =is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE__ ).squeeze()
if is_stopped.all():
break
A : Optional[Any] =scores / seq_lengths
A : str =scores.argsort(descending=SCREAMING_SNAKE_CASE__ )
# tokens tensors are already padded to max_seq_length
A : Optional[Any] =[tokens[i] for i in order]
A : Any =torch.stack(SCREAMING_SNAKE_CASE__ , dim=0 )
A : str =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661 | 0 |
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_lowercase : List[str] =TypeVar('''T''')
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE__ : bool = True ) -> None:
A : List[Any] ={} # dictionary of lists
A : Optional[int] =directed
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase )
self.adj_list[destination_vertex].append(__UpperCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase )
A : Any =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__UpperCamelCase )
A : Dict =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
A : List[Any] =[destination_vertex]
A : int =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCamelCase )
A : Union[str, Any] =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
A : Dict =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
A : Any =[destination_vertex]
A : Optional[Any] =[]
return self
def __repr__( self : List[str] ) -> str:
return pformat(self.adj_list )
| 707 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[int] =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = XLMRobertaTokenizer
lowercase : Dict = XLMRobertaTokenizerFast
lowercase : str = True
lowercase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A : List[str] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
A : List[str] ='<pad>'
A : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Union[str, Any] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : Any =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : Tuple =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A : Union[str, Any] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : Any =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[Any] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : str =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A : List[str] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
A : Optional[int] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
A : List[Any] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : List[Any] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
A : Optional[Any] =XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
A : int =pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
A : Union[str, Any] =self.get_tokenizer()
A : int =self.get_rust_tokenizer()
A : List[str] ='I was born in 92000, and this is falsé.'
A : Union[str, Any] =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Tuple =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.get_rust_tokenizer()
A : int =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Dict =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : Any ='Hello World!'
A : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
A : Any =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A : int =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# fmt: off
A : List[Any] ={'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 661 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : List[Any] ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowercase : int =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def A__ ( lowercase: int, lowercase: Any, lowercase: Optional[Any], lowercase: List[Any], lowercase: Dict ) -> int:
for attribute in key.split('.' ):
A : str =getattr(lowercase, lowercase )
if weight_type is not None:
A : List[str] =getattr(lowercase, lowercase ).shape
else:
A : Tuple =hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
A : Any =value
elif weight_type == "weight_g":
A : List[str] =value
elif weight_type == "weight_v":
A : str =value
elif weight_type == "bias":
A : Union[str, Any] =value
else:
A : Dict =value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def A__ ( lowercase: str, lowercase: Any ) -> Any:
A : Dict =[]
A : int =fairseq_model.state_dict()
A : Any =hf_model.feature_extractor
A : Dict =hf_model.adapter
for name, value in fairseq_dict.items():
A : Optional[int] =False
if "conv_layers" in name:
load_conv_layer(
lowercase, lowercase, lowercase, lowercase, hf_model.config.feat_extract_norm == 'group', )
A : Union[str, Any] =True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(lowercase, lowercase, lowercase, lowercase )
A : Dict =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A : Dict =True
if "*" in mapped_key:
A : Union[str, Any] =name.split(lowercase )[0].split('.' )[-2]
A : Dict =mapped_key.replace('*', lowercase )
if "weight_g" in name:
A : Any ="""weight_g"""
elif "weight_v" in name:
A : int ="""weight_v"""
elif "bias" in name:
A : Any ="""bias"""
elif "weight" in name:
A : int ="""weight"""
else:
A : int =None
set_recursively(lowercase, lowercase, lowercase, lowercase, lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F'Unused weights: {unused_weights}' )
def A__ ( lowercase: Optional[int], lowercase: Tuple, lowercase: Dict, lowercase: Union[str, Any], lowercase: List[Any] ) -> str:
A : Union[str, Any] =full_name.split('conv_layers.' )[-1]
A : Union[str, Any] =name.split('.' )
A : Union[str, Any] =int(items[0] )
A : List[Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A : List[Any] =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A : List[Any] =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A : str =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A : Union[str, Any] =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Tuple, lowercase: Tuple ) -> List[str]:
A : Any =full_name.split('adaptor.' )[-1]
A : Optional[Any] =name.split('.' )
if items[1].isdigit():
A : Tuple =int(items[1] )
else:
A : Tuple =None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
A : List[str] =value
logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
A : Dict =value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
A : List[Any] =value
logger.info(F'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
A : Dict =value
logger.info(F'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(lowercase, lowercase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
A : List[str] =value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
A : Tuple =value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
def A__ ( lowercase: Union[str, Any] ) -> Optional[Any]:
A : Any =emb.weight.shape
A : Any =nn.Linear(lowercase, lowercase, bias=lowercase )
A : Any =emb.weight.data
return lin_layer
@torch.no_grad()
def A__ ( lowercase: List[Any], lowercase: Optional[int], lowercase: int, lowercase: List[Any], lowercase: Dict, lowercase: int, lowercase: Optional[int], lowercase: List[Any], lowercase: Optional[Any], lowercase: Tuple, lowercase: int, ) -> str:
A : Tuple =WavaVecaConfig.from_pretrained(
lowercase, add_adapter=lowercase, adapter_stride=lowercase, adapter_kernel_size=lowercase, use_auth_token=lowercase, output_hidden_size=lowercase, )
A : int =MBartConfig.from_pretrained(lowercase )
# load model
A : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
}, )
A : Optional[Any] =model[0].eval()
# load feature extractor
A : int =WavaVecaFeatureExtractor.from_pretrained(lowercase, use_auth_token=lowercase )
# set weights for wav2vec2 encoder
A : str =WavaVecaModel(lowercase )
recursively_load_weights_wavaveca(model.encoder, lowercase )
# load decoder weights
A : List[str] =MBartForCausalLM(lowercase )
A : Dict =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=lowercase )
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
A : Optional[Any] =SpeechEncoderDecoderModel(encoder=lowercase, decoder=lowercase )
A : Optional[Any] =False
A : Optional[int] =MBartaaTokenizer(lowercase )
tokenizer.save_pretrained(lowercase )
A : Optional[int] =hf_wavavec.config.to_dict()
A : Dict =tokenizer.pad_token_id
A : Union[str, Any] =tokenizer.bos_token_id
A : Any =tokenizer.eos_token_id
A : List[str] ="""mbart50"""
A : Optional[Any] ="""wav2vec2"""
A : Optional[int] =tokenizer.eos_token_id
A : int =250_004
A : List[str] =tokenizer.eos_token_id
A : Dict =SpeechEncoderDecoderConfig.from_dict(lowercase )
hf_wavavec.save_pretrained(lowercase )
feature_extractor.save_pretrained(lowercase )
if __name__ == "__main__":
_lowercase : str =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1_0_2_4, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=2_5_0_0_0_4, type=int, help='''`decoder_start_token_id` of model config''')
_lowercase : Tuple =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 708 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict ={
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "xglm"
lowercase : Any = ["past_key_values"]
lowercase : Dict = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=25_60_08 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : str=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
A : str =vocab_size
A : Union[str, Any] =max_position_embeddings
A : Optional[Any] =d_model
A : Optional[int] =ffn_dim
A : int =num_layers
A : Any =attention_heads
A : Dict =activation_function
A : List[Any] =dropout
A : str =attention_dropout
A : List[Any] =activation_dropout
A : List[Any] =layerdrop
A : List[Any] =init_std
A : Union[str, Any] =scale_embedding # scale factor will be sqrt(d_model) if True
A : List[str] =use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 661 | 0 |
from datetime import datetime as dt
import os
from github import Github
_lowercase : Optional[Any] =[
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def A__ ( ) -> Union[str, Any]:
A : str =Github(os.environ['GITHUB_TOKEN'] )
A : Optional[Any] =g.get_repo('huggingface/transformers' )
A : Optional[int] =repo.get_issues(state='open' )
for issue in open_issues:
A : Tuple =sorted([comment for comment in issue.get_comments()], key=lambda lowercase : i.created_at, reverse=lowerCAmelCase__ )
A : Tuple =comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 709 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase : List[str] ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def A__ ( ) -> List[Any]:
A : Any =_ask_options(
'In which compute environment are you running?', ['This machine', 'AWS (Amazon SageMaker)'], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A : Tuple =get_sagemaker_input()
else:
A : str =get_cluster_input()
return config
def A__ ( lowercase: int=None ) -> str:
if subparsers is not None:
A : List[str] =subparsers.add_parser('config', description=lowercase )
else:
A : Union[str, Any] =argparse.ArgumentParser('Accelerate config command', description=lowercase )
parser.add_argument(
'--config_file', default=lowercase, help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
), )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def A__ ( lowercase: Tuple ) -> List[Any]:
A : Union[str, Any] =get_user_input()
if args.config_file is not None:
A : Optional[Any] =args.config_file
else:
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
A : Union[str, Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase )
else:
config.to_yaml_file(lowercase )
print(F'accelerate configuration saved at {config_file}' )
def A__ ( ) -> Optional[int]:
A : Any =config_command_parser()
A : int =parser.parse_args()
config_command(lowercase )
if __name__ == "__main__":
main()
| 661 | 0 |
import operator as op
def A__ ( lowercase: Optional[Any] ) -> List[Any]:
A : Dict =[]
A : str =lambda lowercase, lowercase : int(x / y ) # noqa: E731 integer division operation
A : List[Any] ={
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ), 'Action'.center(12 ), 'Stack', sep=' | ' )
print('-' * (30 + len(lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ('push(' + x + ')').ljust(12 ), ','.join(lowercase ), sep=' | ' )
else:
A : str =stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + b + ')').ljust(12 ), ','.join(lowercase ), sep=' | ' )
A : Optional[int] =stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + a + ')').ljust(12 ), ','.join(lowercase ), sep=' | ' )
stack.append(
str(opr[x](int(lowercase ), int(lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ('push(' + a + x + b + ')').ljust(12 ), ','.join(lowercase ), sep=' | ', )
return int(stack[0] )
if __name__ == "__main__":
_lowercase : str =input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 710 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[str] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : Dict =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : List[Any] =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Tuple =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : Dict =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : List[Any] =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : Optional[int] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : Any =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[Any] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Optional[Any] =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : List[Any] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: Dict ) -> int:
if _re_test_backend.search(lowercase ) is None:
return None
A : Any =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Any ) -> List[Any]:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : Optional[Any] =f.readlines()
A : Dict =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Optional[int] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : int =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : int =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : int =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : Optional[int] =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : Dict =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : str ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Optional[int] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : Optional[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : Optional[Any] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : int =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : Optional[int] =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : Optional[int] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : Optional[Any] =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : Any =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Optional[Any] ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : str =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Optional[Any] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : Any =lines[line_index]
A : Any =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Dict =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Any, lowercase: int ) -> Dict:
def find_duplicates(lowercase: List[str] ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : List[Any] =[]
for key in import_dict_objects.keys():
A : List[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : Tuple =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> List[str]:
A : Dict =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Any =os.path.join(lowercase, '__init__.py' )
A : Union[str, Any] =parse_init(lowercase )
if objects is not None:
A : str =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Any =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> int:
A : List[str] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : Any =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : List[str] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : Optional[Any] =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : Dict =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Tuple =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> Tuple:
# This is to make sure the transformers module imported is the one in the repo.
A : str =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : Any =spec.loader.load_module()
A : Any =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : Dict ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 661 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int ={
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : Any =logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
lowercase : Optional[str] = field(
default="linear" , metadata={"help": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 661 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ):
'''simple docstring'''
lowercase : Any = ["image_processor", "tokenizer"]
lowercase : Dict = "BridgeTowerImageProcessor"
lowercase : int = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> Any:
A : Dict =self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
# add pixel_values + pixel_mask
A : Tuple =self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , do_center_crop=lowerCAmelCase_ , **lowerCAmelCase_ )
encoding.update(lowerCAmelCase_ )
return encoding
def SCREAMING_SNAKE_CASE_ ( self : Any , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Any:
A : int =self.tokenizer.model_input_names
A : Tuple =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 712 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : int =2
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
A , A , A , A : Optional[Any] =bos, unk, pad, eos
A : Dict =[]
A : Union[str, Any] =[]
A : Any ={}
A : int =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : Any =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return self.indices == other.indices
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
A : Union[str, Any] =cls()
d.add_from_file(SCREAMING_SNAKE_CASE__ )
return d
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if word in self.indices and not overwrite:
A : int =self.indices[word]
A : Union[str, Any] =self.count[idx] + n
return idx
else:
A : Tuple =len(self.symbols )
A : str =idx
self.symbols.append(SCREAMING_SNAKE_CASE__ )
self.count.append(SCREAMING_SNAKE_CASE__ )
return idx
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE__ ) )
return
A : str =f.readlines()
A : int =self._load_meta(SCREAMING_SNAKE_CASE__ )
for line in lines[indices_start_line:]:
try:
A , A : Optional[int] =line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A : int =True
A , A : Optional[Any] =line.rsplit(' ' , 1 )
else:
A : Any =False
A : Tuple =int(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE__ ) )
self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A__ ( lowercase: Union[str, Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A : int =dict((re.sub(r'@@$', '', lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', lowercase ), v) for k, v in d.items() )
A : int ='<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : List[Any] =d[k] # restore
return da
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> str:
# prep
if not os.path.exists(lowercase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase, exist_ok=lowercase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[str] =os.path.join(lowercase, 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Optional[Any] =torch.load(lowercase, map_location='cpu' )
A : Any =chkpt['cfg']['model']
# dicts
A : Any =os.path.join(lowercase, 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Dict =Dictionary.load(lowercase )
A : Optional[Any] =rewrite_dict_keys(src_dict.indices )
A : Tuple =len(lowercase )
A : Any =os.path.join(lowercase, VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
A : List[str] =os.path.join(lowercase, 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : List[str] =os.path.join(lowercase, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase, lowercase )
# model config
A : Tuple =os.path.join(lowercase, 'config.json' )
A : Tuple ={
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
A : int =os.path.join(lowercase, lowercase )
A : List[str] ={
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
A : List[Any] =chkpt['model']
# remove unneeded keys
A : List[Any] =[
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
A : str =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A : Union[str, Any] =model_state_dict.pop(lowercase )
else:
A : List[str] =model_state_dict.pop(lowercase )
A : Any =BioGptConfig.from_pretrained(lowercase )
A : str =BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
A : Tuple =os.path.join(lowercase, lowercase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase, lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[Any] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 661 | 0 |
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
'''simple docstring'''
lowercase : List[str] = "microsoft/speecht5_tts"
lowercase : Tuple = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
lowercase : int = "text_reader"
lowercase : Any = SpeechTaProcessor
lowercase : Optional[int] = SpeechTaForTextToSpeech
lowercase : Optional[Any] = SpeechTaHifiGan
lowercase : List[str] = ["text"]
lowercase : List[Any] = ["audio"]
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[Any]:
if self.post_processor is None:
A : List[str] ='''microsoft/speecht5_hifigan'''
super().setup()
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> str:
A : Union[str, Any] =self.pre_processor(text=_UpperCAmelCase , return_tensors='pt' , truncation=_UpperCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
A : Any =load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
A : Union[str, Any] =torch.tensor(embeddings_dataset[73_05]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
with torch.no_grad():
return self.model.generate_speech(**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
with torch.no_grad():
return self.post_processor(_UpperCAmelCase ).cpu().detach()
| 713 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_lowercase : str =False
_lowercase : Optional[Any] =False
def A__ ( lowercase: Namespace ) -> Optional[int]:
return TrainCommand(lowercase )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Optional[Any] =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE__ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Namespace ) -> List[Any]:
A : Optional[int] =logging.get_logger('transformers-cli/training' )
A : Dict ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =args.output
A : List[str] =args.column_label
A : int =args.column_text
A : Union[str, Any] =args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A : Optional[Any] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
A : Tuple =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Dict =None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
A : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Optional[Any] =args.validation_split
A : str =args.train_batch_size
A : Any =args.valid_batch_size
A : Dict =args.learning_rate
A : List[str] =args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 661 | 0 |
def A__ ( lowercase: str ) -> Tuple:
A : Tuple =1
A : List[str] =2
while i * i <= n:
A : int =0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def A__ ( ) -> str:
A : List[str] =1
A : Any =1
while True:
i += 1
t_num += i
if count_divisors(UpperCAmelCase__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 714 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Tuple=30 , SCREAMING_SNAKE_CASE__ : int=4_00 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Any=1 / 2_55 , SCREAMING_SNAKE_CASE__ : int=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Optional[Any] =size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
A : Union[str, Any] =parent
A : Union[str, Any] =batch_size
A : Union[str, Any] =num_channels
A : int =min_resolution
A : List[Any] =max_resolution
A : Dict =do_resize
A : Tuple =size
A : List[str] =do_normalize
A : List[Any] =image_mean
A : Dict =image_std
A : Any =do_rescale
A : List[str] =rescale_factor
A : Optional[Any] =do_pad
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Dict:
if not batched:
A : Any =image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
A , A : Union[str, Any] =image.size
else:
A , A : Tuple =image.shape[1], image.shape[2]
if w < h:
A : Any =int(self.size['shortest_edge'] * h / w )
A : Any =self.size['shortest_edge']
elif w > h:
A : Dict =self.size['shortest_edge']
A : Dict =int(self.size['shortest_edge'] * w / h )
else:
A : List[str] =self.size['shortest_edge']
A : Dict =self.size['shortest_edge']
else:
A : List[Any] =[]
for image in image_inputs:
A , A : int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : str =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
A : Tuple =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
A : str =ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
A : str =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
A : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A : List[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : List[str] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : Union[str, Any] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A : Tuple =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Any =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : Optional[int] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A : Optional[int] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Tuple =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : int =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
# prepare image and target
A : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A : List[Any] =json.loads(f.read() )
A : Any ={'image_id': 3_97_69, 'annotations': target}
# encode them
A : str =ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
A : Any =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Optional[Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : List[str] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Dict =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : str =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : Dict =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : Union[str, Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
A : List[Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : int =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
# prepare image, target and masks_path
A : List[str] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A : Optional[int] =json.loads(f.read() )
A : int ={'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
A : Optional[Any] =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A : List[Any] =ConditionalDetrImageProcessor(format='coco_panoptic' )
A : Union[str, Any] =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Dict =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : Dict =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Optional[int] =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : List[Any] =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Any =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : List[Any] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : Any =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : str =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
A : int =82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
A : Any =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : str =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
| 661 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_lowercase : int ={
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowercase : Optional[Any] =logging.WARNING
def A__ ( ) -> str:
A : Any =os.getenv('DATASETS_VERBOSITY', UpperCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option DATASETS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def A__ ( ) -> str:
return __name__.split('.' )[0]
def A__ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def A__ ( ) -> None:
# Apply our default configuration to the library root logger.
A : int =_get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def A__ ( ) -> None:
A : Dict =_get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def A__ ( lowercase: Optional[str] = None ) -> logging.Logger:
if name is None:
A : Dict =_get_library_name()
return logging.getLogger(UpperCAmelCase__ )
def A__ ( ) -> int:
return _get_library_root_logger().getEffectiveLevel()
def A__ ( lowercase: int ) -> None:
_get_library_root_logger().setLevel(UpperCAmelCase__ )
def A__ ( ) -> Dict:
return set_verbosity(UpperCAmelCase__ )
def A__ ( ) -> Tuple:
return set_verbosity(UpperCAmelCase__ )
def A__ ( ) -> Dict:
return set_verbosity(UpperCAmelCase__ )
def A__ ( ) -> Optional[int]:
return set_verbosity(UpperCAmelCase__ )
def A__ ( ) -> None:
A : str =False
def A__ ( ) -> None:
A : str =True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ) -> int: # pylint: disable=unused-argument
A : Union[str, Any] =args[0] if args else None
def __iter__( self : Tuple ) -> Optional[Any]:
return iter(self._iterator )
def __getattr__( self : str , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
def empty_fn(*SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Union[str, Any] ) -> Optional[Any]:
return self
def __exit__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
return
_lowercase : Union[str, Any] =True
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __call__( self : Any , *SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__UpperCamelCase , **__UpperCamelCase )
else:
return EmptyTqdm(*__UpperCamelCase , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
A : int =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__UpperCamelCase , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowercase : int =_tqdm_cls()
def A__ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def A__ ( ) -> Any:
global _tqdm_active
A : Optional[int] =True
def A__ ( ) -> Optional[Any]:
global _tqdm_active
A : Optional[int] =False
| 715 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : List[Any] =1_6
_lowercase : Union[str, Any] =3_2
def A__ ( lowercase: Accelerator, lowercase: int = 16, lowercase: str = "bert-base-cased" ) -> Optional[int]:
A : List[Any] =AutoTokenizer.from_pretrained(lowercase )
A : Any =load_dataset('glue', 'mrpc' )
def tokenize_function(lowercase: Any ):
# max_length=None => use the model max length (it's actually the default)
A : List[str] =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A : Any =datasets.map(
lowercase, batched=lowercase, remove_columns=['idx', 'sentence1', 'sentence2'], load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : Dict =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowercase: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase, padding='max_length', max_length=128, return_tensors='pt' )
return tokenizer.pad(lowercase, padding='longest', return_tensors='pt' )
# Instantiate dataloaders.
A : Union[str, Any] =DataLoader(
tokenized_datasets['train'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
A : str =DataLoader(
tokenized_datasets['validation'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
def A__ ( lowercase: Dict, lowercase: Optional[int], lowercase: Any, lowercase: str ) -> Tuple:
model.eval()
A : Tuple =0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A : Tuple =model(**lowercase )
A : Tuple =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A : Union[str, Any] =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
A : List[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
A : Optional[int] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase, references=lowercase, )
A : Union[str, Any] =metric.compute()
return eval_metric["accuracy"]
def A__ ( lowercase: Union[str, Any], lowercase: Dict ) -> List[str]:
# Initialize accelerator
A : Optional[int] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : int =config['lr']
A : Optional[Any] =int(config['num_epochs'] )
A : Union[str, Any] =int(config['seed'] )
A : List[str] =int(config['batch_size'] )
A : Optional[Any] =args.model_name_or_path
set_seed(lowercase )
A , A : str =get_dataloaders(lowercase, lowercase, lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : List[str] =AutoModelForSequenceClassification.from_pretrained(lowercase, return_dict=lowercase )
# Instantiate optimizer
A : Any =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A : List[str] =optimizer_cls(params=model.parameters(), lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
A : Optional[int] =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A : Dict =1
A : Union[str, Any] =(len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A : List[Any] =get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=0, num_training_steps=lowercase, )
else:
A : List[str] =DummyScheduler(lowercase, total_num_steps=lowercase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A : Optional[int] =accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# We need to keep track of how many total steps we have iterated over
A : Tuple =0
# We also need to keep track of the stating epoch so files are named properly
A : List[str] =0
A : Tuple =evaluate.load('glue', 'mrpc' )
A : Optional[int] =num_epochs
if args.partial_train_epoch is not None:
A : Dict =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A : List[Any] =args.resume_from_checkpoint.split('epoch_' )[1]
A : List[Any] =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A : Union[str, Any] =int(lowercase ) + 1
A : List[str] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
accelerator.print('resumed checkpoint performance:', lowercase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:', lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:', optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir, F'state_{starting_epoch-1}.json' ), 'r' ) as f:
A : Union[str, Any] =json.load(lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A : str ={}
for epoch in range(lowercase, lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
A : Tuple =model(**lowercase )
A : List[Any] =outputs.loss
A : Any =loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A : Union[str, Any] =F'epoch_{epoch}'
A : Optional[Any] =os.path.join(args.output_dir, lowercase )
accelerator.save_state(lowercase )
A : Optional[Any] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
A : Dict =accuracy
A : Optional[Any] =lr_scheduler.get_lr()[0]
A : Any =optimizer.param_groups[0]['lr']
A : str =epoch
A : Dict =overall_step
accelerator.print(F'epoch {epoch}:', lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'state_{epoch}.json' ), 'w' ) as f:
json.dump(lowercase, lowercase )
def A__ ( ) -> Optional[int]:
A : Optional[int] =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path', type=lowercase, default='bert-base-cased', help='Path to pretrained model or model identifier from huggingface.co/models.', required=lowercase, )
parser.add_argument(
'--output_dir', type=lowercase, default='.', help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.', )
parser.add_argument(
'--resume_from_checkpoint', type=lowercase, default=lowercase, help='If the training should continue from a checkpoint folder.', )
parser.add_argument(
'--partial_train_epoch', type=lowercase, default=lowercase, help='If passed, the training will stop after this number of epochs.', )
parser.add_argument(
'--num_epochs', type=lowercase, default=2, help='Number of train epochs.', )
A : str =parser.parse_args()
A : Optional[int] ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 661 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict ={
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : int = """roformer"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=5_00_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : int=7_68 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=True , **SCREAMING_SNAKE_CASE__ : int , ) -> Tuple:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : List[Any] =vocab_size
A : Any =hidden_size if embedding_size is None else embedding_size
A : Dict =hidden_size
A : List[Any] =num_hidden_layers
A : Tuple =num_attention_heads
A : Union[str, Any] =hidden_act
A : List[Any] =intermediate_size
A : Optional[Any] =hidden_dropout_prob
A : List[str] =attention_probs_dropout_prob
A : str =max_position_embeddings
A : Optional[int] =type_vocab_size
A : Optional[Any] =initializer_range
A : Tuple =layer_norm_eps
A : Optional[int] =rotary_value
A : int =use_cache
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
if self.task == "multiple-choice":
A : Optional[int] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A : List[str] ={0: 'batch', 1: 'sequence'}
A : str ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 716 |
def A__ ( lowercase: int ) -> int:
if not isinstance(lowercase, lowercase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A : Any =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 | 0 |
def A__ ( lowercase: Optional[Any] ) -> List[Any]:
if len(lowercase ) <= 1:
return [tuple(lowercase )]
A : str =[]
def generate(lowercase: Optional[int], lowercase: List[Any] ):
A : str =[0] * n
res.append(tuple(lowercase ) )
A : Union[str, Any] =0
while i < n:
if c[i] < i:
if i % 2 == 0:
A , A : Optional[Any] =arr[i], arr[0]
else:
A , A : Any =arr[i], arr[c[i]]
res.append(tuple(lowercase ) )
c[i] += 1
A : List[Any] =0
else:
A : int =0
i += 1
generate(len(lowercase ), lowercase )
return res
if __name__ == "__main__":
_lowercase : str =input('''Enter numbers separated by a comma:\n''').strip()
_lowercase : Any =[int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 717 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A__ ( *lowercase: Tuple, lowercase: Optional[Union[Dict, Any]] = None, lowercase: Dict=True, lowercase: Any=2 ) -> List[Any]:
from .. import __version__
A : Optional[Any] =take_from
A : Union[str, Any] =()
if not isinstance(args[0], lowercase ):
A : List[str] =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase ).base_version ) >= version.parse(lowercase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
A : Tuple =None
if isinstance(lowercase, lowercase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase ),)
A : Union[str, Any] =F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(lowercase, lowercase ):
values += (getattr(lowercase, lowercase ),)
A : Optional[Any] =F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A : List[Any] =F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A : List[Any] =warning + ' ' if standard_warn else ''
warnings.warn(warning + message, lowercase, stacklevel=lowercase )
if isinstance(lowercase, lowercase ) and len(lowercase ) > 0:
A : Any =inspect.getouterframes(inspect.currentframe() )[1]
A : int =call_frame.filename
A : int =call_frame.lineno
A : Optional[int] =call_frame.function
A , A : int =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(lowercase ) == 0:
return
elif len(lowercase ) == 1:
return values[0]
return values
| 661 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : int ={
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =[
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_lowercase : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( lowercase: int, lowercase: str ) -> Dict:
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Dict, lowercase: Tuple, lowercase: str ) -> str:
A : Any =tmp_path / 'cache'
A : Dict ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Dict =JsonDatasetReader(lowercase, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Any, lowercase: Union[str, Any] ) -> Tuple:
A : Tuple =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : Optional[Any] =features.copy() if features else default_expected_features
A : Union[str, Any] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : str =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
], )
def A__ ( lowercase: Optional[int], lowercase: str, lowercase: Dict ) -> Optional[int]:
A : int =tmp_path / 'cache'
A : Tuple ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
A : int =features.copy() if features else default_expected_features
A : str =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( lowercase: Optional[Any], lowercase: str ) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A : str ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
A : Dict =features.copy()
A : List[str] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : int =tmp_path / 'cache'
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Union[str, Any], lowercase: Any, lowercase: str ) -> Optional[Any]:
A : Optional[int] =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =JsonDatasetReader(lowercase, cache_dir=lowercase, split=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def A__ ( lowercase: Optional[Any], lowercase: int, lowercase: Union[str, Any] ) -> List[Any]:
if issubclass(lowercase, lowercase ):
A : int =jsonl_path
elif issubclass(lowercase, lowercase ):
A : Any =[jsonl_path]
A : Optional[Any] =tmp_path / 'cache'
A : Tuple ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[str] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
def A__ ( lowercase: List[str], lowercase: Tuple, lowercase: Optional[Any]=("train",) ) -> Tuple:
assert isinstance(lowercase, lowercase )
for split in splits:
A : List[str] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Tuple, lowercase: Optional[int], lowercase: Any ) -> str:
A : List[str] =tmp_path / 'cache'
A : Union[str, Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : str =JsonDatasetReader({'train': jsonl_path}, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Optional[int] ) -> Tuple:
A : Any =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =features.copy() if features else default_expected_features
A : Dict =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[Any] =JsonDatasetReader({'train': jsonl_path}, features=lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any] ) -> Tuple:
if split:
A : Optional[int] ={split: jsonl_path}
else:
A : Dict ='train'
A : Optional[Any] ={'train': jsonl_path, 'test': jsonl_path}
A : Tuple =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[Any] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( lowercase: List[Any] ) -> Tuple:
return json.load(lowercase )
def A__ ( lowercase: List[Any] ) -> Tuple:
return [json.loads(lowercase ) for line in buffer]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : Any =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : List[Any] =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
A : Union[str, Any] =tmp_path_factory.mktemp('data' ) / f'test.json.{extension}'
A : Union[str, Any] =str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : str =f.read()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : List[str] =f.read()
assert exported_content == original_content
| 661 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : List[Any] =['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
A : Optional[Any] =get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ['c'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2] )
# Out indices set to match out features
A : Optional[Any] =get_aligned_output_features_output_indices(['a', 'c'] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ['a', 'c'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features set to match out indices
A : int =get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [0, 2] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ['a', 'c'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features selected from negative indices
A : str =get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [-3, -1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ['a', 'c'] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [-3, -1] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
# Stage names must be set
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , SCREAMING_SNAKE_CASE__ )
# Out features must be a list
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]:
A : Any =BackboneMixin()
A : List[str] =['''a''', '''b''', '''c''']
A : Tuple =['''a''', '''c''']
A : Optional[Any] =[0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
A : Tuple =['''a''', '''b''']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
A : Any =[-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 719 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] = DDIMPipeline
lowercase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
A : str =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
A : Optional[int] =DDIMScheduler()
A : Optional[Any] ={'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
A : List[Any] =torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A : Union[str, Any] =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A : Optional[int] ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
A : Union[str, Any] ='cpu'
A : Tuple =self.get_dummy_components()
A : Union[str, Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : str =self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
A : str =pipe(**SCREAMING_SNAKE_CASE__ ).images
A : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A : Optional[Any] =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Any ='google/ddpm-cifar10-32'
A : Optional[int] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMScheduler()
A : int =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Dict =torch.manual_seed(0 )
A : Optional[Any] =ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='numpy' ).images
A : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Tuple =np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : Optional[int] ='google/ddpm-ema-bedroom-256'
A : str =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : Optional[int] =ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
A : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Optional[int] =np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 661 | 0 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
_lowercase : Optional[Any] =TypeVar('''T''')
_lowercase : List[str] =Union[List[T], Tuple[T, ...]]
_lowercase : int =Union[T, List[T], Dict[str, T]]
_lowercase : List[str] =Union[str, bytes, os.PathLike]
| 720 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
A : Dict =tempfile.mkdtemp()
A : int =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
A : str =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Optional[int] =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
A : Optional[int] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : str =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Union[str, Any] =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
A : Optional[Any] =self.get_image_processor()
A : Optional[Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Dict =self.prepare_image_inputs()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : Optional[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
A : str =self.get_image_processor()
A : Union[str, Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : str =[torch.ones((1, 3, 5, 5) )]
A : Optional[Any] =[[17_64, 26_46]]
A : List[Any] =[[6_83, 10_24]]
A : Union[str, Any] =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : str =[np.ones((1, 3, 5, 5) )]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A : Any =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
A : Tuple =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
A : Optional[Any] =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Any =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
A : Optional[Any] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[Any] =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Dict =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
A : Any =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : Tuple =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : List[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
A : int =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =[tf.ones((1, 3, 5, 5) )]
A : Tuple =[[17_64, 26_46]]
A : Union[str, Any] =[[6_83, 10_24]]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : List[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : Any =[np.ones((1, 3, 5, 5) )]
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A : List[str] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
A : Optional[int] =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Dict =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
A : Any =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Tuple =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
A : Optional[Any] =self.get_image_processor()
A : Dict =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A : Optional[int] =[tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )]
A : Union[str, Any] =[torch.tensor(SCREAMING_SNAKE_CASE__ )]
A : int =[[17_64, 26_46]]
A : int =[[6_83, 10_24]]
A : Dict =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
A : Union[str, Any] =self.get_image_processor()
A : int =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : List[Any] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Tuple =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
A : Dict =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
| 661 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Tuple ={
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =[
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_lowercase : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_lowercase : Optional[Any] =WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def A__ ( lowercase: Optional[int] ) -> Optional[int]:
A : str =test_results.split(' ' )
A : List[str] =0
A : Tuple =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A : List[str] =expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( lowercase: List[Any] ) -> str:
A : Union[str, Any] ={}
A : Optional[Any] =None
A : Union[str, Any] =False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]', lowercase ):
A : List[Any] =True
A : Any =line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
A : Dict =line
A : List[str] =False
return failures
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
A : Tuple =title
A : Dict =doc_test_results['time_spent'].split(',' )[0]
A : Union[str, Any] =doc_test_results['success']
A : Any =doc_test_results['failures']
A : Optional[Any] =self.n_success + self.n_failures
# Failures and success of the modeling tests
A : Union[str, Any] =doc_test_results
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
A : Any =[self._time_spent]
A : List[str] =0
for time in time_spent:
A : List[Any] =time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
A : List[str] =[0, 0, time_parts[0]]
A , A , A : Tuple =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A , A , A : str =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Tuple =40
A : Optional[Any] ={k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
A : Any =''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : Optional[int] =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
A : Tuple =[
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
A : Any =f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
A : Dict =client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
A : List[str] =''
for key, value in failures.items():
A : Any =value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
A : Union[str, Any] =job_name
A : Any ={'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
A : int ={
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
A : Union[str, Any] =self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
A : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
A : Any =f'*Num failures* :{len(job_result["failed"] )} \n'
A : List[Any] =job_result['failures']
A : Any =self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def A__ ( ) -> Union[str, Any]:
A : Any =os.environ['GITHUB_RUN_ID']
A : List[Any] =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
A : Union[str, Any] =requests.get(lowercase ).json()
A : List[Any] ={}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
A : List[str] =math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowercase ):
A : List[str] =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.', lowercase )
return {}
def A__ ( lowercase: str ) -> Optional[Any]:
A : Any ={}
if os.path.exists(lowercase ):
A : List[Any] =os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase, lowercase ), encoding='utf-8' ) as f:
A : Optional[int] =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase, lowercase )}.' ) from e
return _artifact
def A__ ( ) -> int:
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
A : Dict =name
A : Dict =[]
def __str__( self : Optional[Any] ) -> List[str]:
return self.name
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.paths.append({'name': self.name, 'path': path} )
A : Dict[str, Artifact] ={}
A : str =filter(os.path.isdir, os.listdir() )
for directory in directories:
A : Tuple =directory
if artifact_name not in _available_artifacts:
A : int =Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
_lowercase : Optional[int] =get_job_links()
_lowercase : str =retrieve_available_artifacts()
_lowercase : List[Any] =collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_lowercase : Optional[Any] ={
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_lowercase : List[Any] =github_actions_job_links.get('''run_doctests''')
_lowercase : int =available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_lowercase : Dict =retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_lowercase , _lowercase , _lowercase : List[Any] =handle_test_results(artifact['''stats'''])
_lowercase : Any =failed
_lowercase : Union[str, Any] =success
_lowercase : str =time_spent[1:-1] + ''', '''
_lowercase : Any =extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_lowercase : Tuple =line.replace('''FAILED ''', '''''')
_lowercase : int =line.split()[0].replace('''\n''', '''''')
if "::" in line:
_lowercase , _lowercase : str =line.split('''::''')
else:
_lowercase , _lowercase : Union[str, Any] =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_lowercase : Any =docs[file_regex]
doc_test_results[category]["failed"].append(test)
_lowercase : Any =all_failures[test] if test in all_failures else '''N/A'''
_lowercase : Tuple =failure
break
_lowercase : Optional[int] =Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 661 | 0 |
def A__ ( lowercase: float, lowercase: float ) -> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
_lowercase : Dict ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 661 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def A__ ( lowercase: int = 1_000_000, lowercase: int = 10 ) -> int:
A : defaultdict =defaultdict(lowercase )
for outer_width in range(3, (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
A : int =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ), 1 )
else:
A : List[Any] =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase, outer_width - 1, 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 701 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( lowercase: str ) -> List[str]:
def decorator(lowercase: int ):
A : Tuple =getattr(lowercase, 'handle_key', [] )
handle += [key]
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
def A__ ( *lowercase: List[str] ) -> Dict:
def decorator(lowercase: Union[str, Any] ):
A : Optional[int] =getattr(lowercase, 'handle_key', [] )
handle += keys
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __new__( cls : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
A : Dict =super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , 'key_handler' ):
setattr(SCREAMING_SNAKE_CASE__ , 'key_handler' , {} )
setattr(SCREAMING_SNAKE_CASE__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
A : Optional[Any] =getattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , [] )
for key in handled_keys:
A : str =value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ) -> Any:
A : str =get_character()
if char != KEYMAP["undefined"]:
A : List[str] =ord(SCREAMING_SNAKE_CASE__ )
A : List[str] =cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
A : List[str] =char
return handler(cls )
else:
return None
def A__ ( cls: Optional[int] ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 661 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : int =2
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
A : Optional[Any] =bos, unk, pad, eos
A : Dict =[]
A : Union[str, Any] =[]
A : Any ={}
A : int =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : Any =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return self.indices == other.indices
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
A : Union[str, Any] =cls()
d.add_from_file(SCREAMING_SNAKE_CASE__ )
return d
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if word in self.indices and not overwrite:
A : int =self.indices[word]
A : Union[str, Any] =self.count[idx] + n
return idx
else:
A : Tuple =len(self.symbols )
A : str =idx
self.symbols.append(SCREAMING_SNAKE_CASE__ )
self.count.append(SCREAMING_SNAKE_CASE__ )
return idx
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE__ ) )
return
A : str =f.readlines()
A : int =self._load_meta(SCREAMING_SNAKE_CASE__ )
for line in lines[indices_start_line:]:
try:
A : Optional[int] =line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A : int =True
A : Optional[Any] =line.rsplit(' ' , 1 )
else:
A : Any =False
A : Tuple =int(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE__ ) )
self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A__ ( lowercase: Union[str, Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A : int =dict((re.sub(r'@@$', '', lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', lowercase ), v) for k, v in d.items() )
A : int ='<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : List[Any] =d[k] # restore
return da
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> str:
# prep
if not os.path.exists(lowercase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase, exist_ok=lowercase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[str] =os.path.join(lowercase, 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Optional[Any] =torch.load(lowercase, map_location='cpu' )
A : Any =chkpt['cfg']['model']
# dicts
A : Any =os.path.join(lowercase, 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Dict =Dictionary.load(lowercase )
A : Optional[Any] =rewrite_dict_keys(src_dict.indices )
A : Tuple =len(lowercase )
A : Any =os.path.join(lowercase, VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
A : List[str] =os.path.join(lowercase, 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : List[str] =os.path.join(lowercase, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase, lowercase )
# model config
A : Tuple =os.path.join(lowercase, 'config.json' )
A : Tuple ={
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
A : int =os.path.join(lowercase, lowercase )
A : List[str] ={
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
A : List[Any] =chkpt['model']
# remove unneeded keys
A : List[Any] =[
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
A : str =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A : Union[str, Any] =model_state_dict.pop(lowercase )
else:
A : List[str] =model_state_dict.pop(lowercase )
A : Any =BioGptConfig.from_pretrained(lowercase )
A : str =BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
A : Tuple =os.path.join(lowercase, lowercase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase, lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[Any] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 702 |
import math
def A__ ( lowercase: int ) -> list:
A : Optional[Any] =[True] * n
A : Tuple =False
A : List[Any] =False
A : Dict =True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A : Dict =i * 2
while index < n:
A : Dict =False
A : Dict =index + i
A : Tuple =[2]
for i in range(3, lowercase, 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def A__ ( lowercase: int = 999_966_663_333 ) -> int:
A : Optional[int] =math.floor(math.sqrt(lowercase ) ) + 100
A : Optional[int] =prime_sieve(lowercase )
A : Optional[Any] =0
A : List[Any] =0
A : Union[str, Any] =primes[prime_index]
while (last_prime**2) <= limit:
A : Tuple =primes[prime_index + 1]
A : Optional[int] =last_prime**2
A : Tuple =next_prime**2
# Get numbers divisible by lps(current)
A : int =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A : List[Any] =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A : Any =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A : List[str] =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 661 | 0 |
def A__ ( lowercase: int ) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
A : int =4
A : List[Any] =(1 << p) - 1
for _ in range(p - 2 ):
A : Optional[int] =((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 703 |
import heapq
def A__ ( lowercase: dict ) -> set[int]:
A : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase, [-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
A : Dict =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A : List[str] =heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A : str =elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 661 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : List[str]
lowercase : Optional[str] = None
# Automatically constructed
lowercase : ClassVar[str] = "dict"
lowercase : ClassVar[Any] = None
lowercase : str = field(default="Translation" , init=lowerCAmelCase_ , repr=lowerCAmelCase_ )
def __call__( self : Optional[int] ) -> Any:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : Optional[List] = None
lowercase : Optional[int] = None
lowercase : Optional[str] = None
# Automatically constructed
lowercase : ClassVar[str] = "dict"
lowercase : ClassVar[Any] = None
lowercase : str = field(default="TranslationVariableLanguages" , init=lowerCAmelCase_ , repr=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
A : Tuple =sorted(set(self.languages ) ) if self.languages else None
A : Optional[Any] =len(self.languages ) if self.languages else None
def __call__( self : Optional[int] ) -> Dict:
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
A : Tuple =set(self.languages )
if self.languages and set(SCREAMING_SNAKE_CASE__ ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(SCREAMING_SNAKE_CASE__ ) - lang_set ) )}) are not in valid set ({", ".join(SCREAMING_SNAKE_CASE__ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
A : List[str] =[]
for lang, text in translation_dict.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
A : Any =zip(*sorted(SCREAMING_SNAKE_CASE__ ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 704 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase : List[Any] =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
A : Tuple =feature_size
A : int =sampling_rate
A : List[str] =padding_value
A : Tuple =kwargs.pop('padding_side' , 'right' )
A : str =kwargs.pop('return_attention_mask' , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A : Tuple ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
A : Dict =processed_features[self.model_input_names[0]]
A : int =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE__ ) == 0:
if return_attention_mask:
A : List[Any] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A : List[str] =required_input[0]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A : Any =0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE__ ):
A : Dict =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
A : List[Any] ='tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
A : Optional[int] ='pt'
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float, list, tuple, np.ndarray) ):
A : Union[str, Any] ='np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A : int =to_numpy(SCREAMING_SNAKE_CASE__ )
else:
A : List[Any] =[to_numpy(SCREAMING_SNAKE_CASE__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A : List[Any] =self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =processed_features[self.model_input_names[0]]
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if not all(len(SCREAMING_SNAKE_CASE__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A : Tuple =[]
for i in range(SCREAMING_SNAKE_CASE__ ):
A : int ={k: v[i] for k, v in processed_features.items()}
# truncation
A : List[Any] =self._truncate(
SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )
truncated_inputs.append(SCREAMING_SNAKE_CASE__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A : Any =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A : Optional[Any] =PaddingStrategy.MAX_LENGTH
A : List[Any] ={}
for i in range(SCREAMING_SNAKE_CASE__ ):
# padding
A : Optional[Any] =self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
for key, value in outputs.items():
if key not in batch_outputs:
A : Dict =[]
if value.dtype is np.dtype(np.floataa ):
A : Tuple =value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE__ )
return BatchFeature(SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> dict:
A : Optional[int] =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Tuple =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : int =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A : str =np.ones(len(SCREAMING_SNAKE_CASE__ ) , dtype=np.intaa )
if needs_to_be_padded:
A : Union[str, Any] =max_length - len(SCREAMING_SNAKE_CASE__ )
if self.padding_side == "right":
if return_attention_mask:
A : Dict =np.pad(
processed_features['attention_mask'] , (0, difference) )
A : str =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A : List[Any] =np.pad(
processed_features['attention_mask'] , (difference, 0) )
A : Union[str, Any] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A : Tuple =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Any =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : List[str] =len(SCREAMING_SNAKE_CASE__ ) > max_length
if needs_to_be_truncated:
A : Union[str, Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A : Dict =processed_features['attention_mask'][:max_length]
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
A : List[Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Tuple =PaddingStrategy(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Optional[int] =padding
else:
A : List[str] =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 661 | 0 |
import os
from collections.abc import Iterator
def A__ ( lowercase: str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(lowercase ):
A : Union[str, Any] =[d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(lowercase, lowercase ).lstrip('./' )
def A__ ( lowercase: Tuple ) -> Any:
return F'{i * " "}*' if i else "\n##"
def A__ ( lowercase: str, lowercase: str ) -> str:
A : Union[str, Any] =old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowercase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(lowercase )} {new_part.replace("_", " " ).title()}' )
return new_path
def A__ ( lowercase: str = "." ) -> None:
A : str =''
for filepath in sorted(good_file_paths(lowercase ) ):
A : str =os.path.split(lowercase )
if filepath != old_path:
A : Optional[int] =print_path(lowercase, lowercase )
A : Optional[Any] =(filepath.count(os.sep ) + 1) if filepath else 0
A : Any =F'{filepath}/{filename}'.replace(' ', '%20' )
A : Tuple =os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(F'{md_prefix(lowercase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 705 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : List[str] ={
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : int = "deberta-v2"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str=12_81_00 , SCREAMING_SNAKE_CASE__ : List[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Dict=24 , SCREAMING_SNAKE_CASE__ : List[str]=24 , SCREAMING_SNAKE_CASE__ : List[str]=61_44 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-7 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=-1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : Dict =hidden_size
A : Optional[Any] =num_hidden_layers
A : Optional[int] =num_attention_heads
A : Optional[int] =intermediate_size
A : Any =hidden_act
A : Any =hidden_dropout_prob
A : Union[str, Any] =attention_probs_dropout_prob
A : Optional[Any] =max_position_embeddings
A : Tuple =type_vocab_size
A : Tuple =initializer_range
A : int =relative_attention
A : int =max_relative_positions
A : Optional[Any] =pad_token_id
A : Union[str, Any] =position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE__ ) == str:
A : Any =[x.strip() for x in pos_att_type.lower().split('|' )]
A : Any =pos_att_type
A : Tuple =vocab_size
A : Any =layer_norm_eps
A : Optional[Any] =kwargs.get('pooler_hidden_size' , SCREAMING_SNAKE_CASE__ )
A : str =pooler_dropout
A : Any =pooler_hidden_act
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A : List[Any] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A : int ={0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
return 12
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
A : str =super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 661 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( lowercase: int, lowercase: str ) -> Dict:
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Dict, lowercase: Tuple, lowercase: str ) -> str:
A : Any =tmp_path / 'cache'
A : Dict ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Dict =JsonDatasetReader(lowercase, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Any, lowercase: Union[str, Any] ) -> Tuple:
A : Tuple =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : Optional[Any] =features.copy() if features else default_expected_features
A : Union[str, Any] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : str =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
], )
def A__ ( lowercase: Optional[int], lowercase: str, lowercase: Dict ) -> Optional[int]:
A : int =tmp_path / 'cache'
A : Tuple ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
A : int =features.copy() if features else default_expected_features
A : str =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( lowercase: Optional[Any], lowercase: str ) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A : str ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
A : Dict =features.copy()
A : List[str] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : int =tmp_path / 'cache'
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Union[str, Any], lowercase: Any, lowercase: str ) -> Optional[Any]:
A : Optional[int] =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =JsonDatasetReader(lowercase, cache_dir=lowercase, split=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def A__ ( lowercase: Optional[Any], lowercase: int, lowercase: Union[str, Any] ) -> List[Any]:
if issubclass(lowercase, lowercase ):
A : int =jsonl_path
elif issubclass(lowercase, lowercase ):
A : Any =[jsonl_path]
A : Optional[Any] =tmp_path / 'cache'
A : Tuple ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[str] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
def A__ ( lowercase: List[str], lowercase: Tuple, lowercase: Optional[Any]=("train",) ) -> Tuple:
assert isinstance(lowercase, lowercase )
for split in splits:
A : List[str] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Tuple, lowercase: Optional[int], lowercase: Any ) -> str:
A : List[str] =tmp_path / 'cache'
A : Union[str, Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : str =JsonDatasetReader({'train': jsonl_path}, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Optional[int] ) -> Tuple:
A : Any =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =features.copy() if features else default_expected_features
A : Dict =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[Any] =JsonDatasetReader({'train': jsonl_path}, features=lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any] ) -> Tuple:
if split:
A : Optional[int] ={split: jsonl_path}
else:
A : Dict ='train'
A : Optional[Any] ={'train': jsonl_path, 'test': jsonl_path}
A : Tuple =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[Any] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( lowercase: List[Any] ) -> Tuple:
return json.load(lowercase )
def A__ ( lowercase: List[Any] ) -> Tuple:
return [json.loads(lowercase ) for line in buffer]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : Any =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : List[Any] =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
A : Union[str, Any] =tmp_path_factory.mktemp('data' ) / f'test.json.{extension}'
A : Union[str, Any] =str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : str =f.read()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : List[str] =f.read()
assert exported_content == original_content
| 706 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Tuple = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 5_02_57 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : int = 7_68 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "gelu_new" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 1e-5 , SCREAMING_SNAKE_CASE__ : float = 0.0_2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[str]:
super().__init__()
A : str =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
A : List[Any] =prefix_inner_dim
A : Dict =prefix_hidden_dim
A : List[str] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A : Dict =GPTaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , n_positions=SCREAMING_SNAKE_CASE__ , n_embd=SCREAMING_SNAKE_CASE__ , n_layer=SCREAMING_SNAKE_CASE__ , n_head=SCREAMING_SNAKE_CASE__ , n_inner=SCREAMING_SNAKE_CASE__ , activation_function=SCREAMING_SNAKE_CASE__ , resid_pdrop=SCREAMING_SNAKE_CASE__ , embd_pdrop=SCREAMING_SNAKE_CASE__ , attn_pdrop=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ , initializer_range=SCREAMING_SNAKE_CASE__ , scale_attn_weights=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE__ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE__ , )
A : Dict =GPTaLMHeadModel(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , ) -> Optional[Any]:
A : str =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
A : Any =self.encode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.decode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A : int =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A : Optional[int] =torch.cat((dummy_token, input_ids) , dim=1 )
A : Dict =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : torch.device ) -> torch.Tensor:
return torch.zeros(SCREAMING_SNAKE_CASE__ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
return self.encode_prefix(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
A : Dict =torch.split(SCREAMING_SNAKE_CASE__ , 1 , dim=0 )
A : int =[]
A : Optional[int] =[]
for feature in features:
A : int =self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE__ ) ) # back to the clip feature
# Only support beam search for now
A , A : Dict =self.generate_beam(
input_embeds=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A : str =torch.stack(SCREAMING_SNAKE_CASE__ )
A : int =torch.stack(SCREAMING_SNAKE_CASE__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : int = 67 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Dict:
A : Dict =eos_token_id
A : str =None
A : List[Any] =None
A : List[Any] =torch.ones(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.int )
A : str =torch.zeros(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.bool )
if input_embeds is not None:
A : Any =input_embeds
else:
A : List[Any] =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
A : Any =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ )
A : str =outputs.logits
A : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A : List[str] =logits.softmax(-1 ).log()
if scores is None:
A , A : Any =logits.topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Any =generated.expand(SCREAMING_SNAKE_CASE__ , *generated.shape[1:] )
A , A : Tuple =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A : Union[str, Any] =next_tokens
else:
A : str =tokens.expand(SCREAMING_SNAKE_CASE__ , *tokens.shape[1:] )
A : Optional[int] =torch.cat((tokens, next_tokens) , dim=1 )
else:
A : Optional[Any] =-float(np.inf )
A : Tuple =0
A : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A : int =scores_sum / seq_lengths[:, None]
A , A : Optional[int] =scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Dict =next_tokens // scores_sum.shape[1]
A : Optional[Any] =seq_lengths[next_tokens_source]
A : Tuple =next_tokens % scores_sum.shape[1]
A : Optional[Any] =next_tokens.unsqueeze(1 )
A : Optional[Any] =tokens[next_tokens_source]
A : Any =torch.cat((tokens, next_tokens) , dim=1 )
A : List[str] =generated[next_tokens_source]
A : List[Any] =scores_sum_average * seq_lengths
A : Optional[Any] =is_stopped[next_tokens_source]
A : Optional[int] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A : Any =torch.cat((generated, next_token_embed) , dim=1 )
A : Optional[int] =is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE__ ).squeeze()
if is_stopped.all():
break
A : Optional[Any] =scores / seq_lengths
A : str =scores.argsort(descending=SCREAMING_SNAKE_CASE__ )
# tokens tensors are already padded to max_seq_length
A : Optional[Any] =[tokens[i] for i in order]
A : Any =torch.stack(SCREAMING_SNAKE_CASE__ , dim=0 )
A : str =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_lowercase : Optional[Any] ={
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Dict = "facebook/nllb-200-distilled-600M"
lowercase : Optional[Any] = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
lowercase : List[str] = "translator"
lowercase : List[str] = AutoTokenizer
lowercase : int = AutoModelForSeqaSeqLM
lowercase : Dict = LANGUAGE_CODES
lowercase : Dict = ["text", "text", "text"]
lowercase : List[Any] = ["text"]
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
A : str =self.lang_to_code[src_lang]
A : Dict =self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
| 707 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[int] =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = XLMRobertaTokenizer
lowercase : Dict = XLMRobertaTokenizerFast
lowercase : str = True
lowercase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A : List[str] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
A : List[str] ='<pad>'
A : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Union[str, Any] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : Any =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : Tuple =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A : Union[str, Any] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : Any =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[Any] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : str =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A : List[str] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
A : Optional[int] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
A : List[Any] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : List[Any] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
A : Optional[Any] =XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
A : int =pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
A : Union[str, Any] =self.get_tokenizer()
A : int =self.get_rust_tokenizer()
A : List[str] ='I was born in 92000, and this is falsé.'
A : Union[str, Any] =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Tuple =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.get_rust_tokenizer()
A : int =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Dict =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : Any ='Hello World!'
A : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
A : Any =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A : int =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# fmt: off
A : List[Any] ={'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 661 | 0 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Dict = GPTSanJapaneseTokenizer
lowercase : str = False
lowercase : Optional[int] = {"do_clean_text": False, "add_prefix_space": False}
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
super().setUp()
# fmt: off
A : Dict =['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
A : Union[str, Any] ={'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
A : Optional[int] ={'unk_token': '<unk>'}
A : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
A : Dict ='こんにちは、世界。 \nこんばんは、㔺界。😀'
A : Union[str, Any] ='こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
A : Optional[int] =self.get_input_output_texts(SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
return text, ids
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
A : int =self.get_tokenizer()
# Testing tokenization
A : str ='こんにちは、世界。 こんばんは、㔺界。'
A : Any =['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
A : Any =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids without special tokens
A : Any =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
A : Union[str, Any] =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Testing conversion to ids with special tokens
A : Optional[int] =tokens + [tokenizer.unk_token]
A : int =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
A : int =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : int =self.get_tokenizer()
# Testing tokenization
A : Any ='こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
A : Optional[Any] ='こんにちは、、、、世界。こんばんは、、、、世界。'
A : Optional[Any] =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : int =tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : Union[str, Any] =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
A : List[str] ='こんにちは、世界。'
A : List[Any] ='こんばんは、㔺界。😀'
A : Dict ='こんにちは、世界。こんばんは、世界。😀'
A : List[Any] =tokenizer.encode(prefix_text + input_text )
A : Optional[int] =tokenizer.encode('' , prefix_text=prefix_text + input_text )
A : List[str] =tokenizer.encode(SCREAMING_SNAKE_CASE__ , prefix_text=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.decode(SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer.decode(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
A : str =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
A : int ='こんにちは、世界。'
A : Optional[int] ='こんばんは、㔺界。😀'
A : str =len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) - 2
A : Optional[int] =len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) - 2
A : Union[str, Any] =[1] + [0] * (len_prefix + len_text + 1)
A : str =[1] * (len_prefix + len_text + 1) + [0]
A : str =[1] + [1] * (len_prefix) + [0] * (len_text + 1)
A : Union[str, Any] =tokenizer(prefix_text + input_text ).token_type_ids
A : Tuple =tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
A : str =tokenizer(SCREAMING_SNAKE_CASE__ , prefix_text=SCREAMING_SNAKE_CASE__ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[Any]:
A : Tuple =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
A : int =tokenizer.encode('あンいワ' )
A : int =tokenizer.encode('' , prefix_text='あンいワ' )
A : int =tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
A : str =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
A : str =[['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
A : Optional[int] =tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
# fmt: off
A : Dict =[[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
A : int =[[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
A : int =[[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
# tokenizer has no padding token
pass
| 708 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict ={
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "xglm"
lowercase : Any = ["past_key_values"]
lowercase : Dict = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=25_60_08 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : str=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
A : str =vocab_size
A : Union[str, Any] =max_position_embeddings
A : Optional[Any] =d_model
A : Optional[int] =ffn_dim
A : int =num_layers
A : Any =attention_heads
A : Dict =activation_function
A : List[Any] =dropout
A : str =attention_dropout
A : List[Any] =activation_dropout
A : List[Any] =layerdrop
A : List[Any] =init_std
A : Union[str, Any] =scale_embedding # scale factor will be sqrt(d_model) if True
A : List[str] =use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 661 | 0 |
import logging
import os
from .state import PartialState
class SCREAMING_SNAKE_CASE_ ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
A : Optional[int] =PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
A : Union[str, Any] =kwargs.pop('main_process_only' , SCREAMING_SNAKE_CASE__ )
A : Optional[int] =kwargs.pop('in_order' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
A : int =self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
A : Dict =PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A : Tuple =self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def A__ ( lowercase: str, lowercase: str = None ) -> Dict:
if log_level is None:
A : Union[str, Any] =os.environ.get('ACCELERATE_LOG_LEVEL', lowercase )
A : List[Any] =logging.getLogger(lowercase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowercase, {} )
| 709 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase : List[str] ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def A__ ( ) -> List[Any]:
A : Any =_ask_options(
'In which compute environment are you running?', ['This machine', 'AWS (Amazon SageMaker)'], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A : Tuple =get_sagemaker_input()
else:
A : str =get_cluster_input()
return config
def A__ ( lowercase: int=None ) -> str:
if subparsers is not None:
A : List[str] =subparsers.add_parser('config', description=lowercase )
else:
A : Union[str, Any] =argparse.ArgumentParser('Accelerate config command', description=lowercase )
parser.add_argument(
'--config_file', default=lowercase, help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
), )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def A__ ( lowercase: Tuple ) -> List[Any]:
A : Union[str, Any] =get_user_input()
if args.config_file is not None:
A : Optional[Any] =args.config_file
else:
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
A : Union[str, Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase )
else:
config.to_yaml_file(lowercase )
print(F'accelerate configuration saved at {config_file}' )
def A__ ( ) -> Optional[int]:
A : Any =config_command_parser()
A : int =parser.parse_args()
config_command(lowercase )
if __name__ == "__main__":
main()
| 661 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_lowercase : Optional[int] =['''bert-base-uncased''', '''bert-base-cased''']
_lowercase : Any ='''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class SCREAMING_SNAKE_CASE_ ( tf.keras.Model ):
'''simple docstring'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
super().__init__()
A : Optional[Any] =tokenizer
A : Any =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : int =TFAutoModel.from_config(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
A : List[str] =self.tokenizer(SCREAMING_SNAKE_CASE__ )
A : Tuple =self.bert(**SCREAMING_SNAKE_CASE__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
super().setUp()
A : List[Any] =[
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
A : Dict =[TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , use_fast_bert_tokenizer=SCREAMING_SNAKE_CASE__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
A : Union[str, Any] =[
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
A : str =list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
A : Union[str, Any] =tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='tf' , padding='longest' )
A : Dict =tf_tokenizer(SCREAMING_SNAKE_CASE__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
for tf_tokenizer in self.tf_tokenizers:
A : List[str] =tf_tokenizer(self.paired_sentences )
A : Tuple =tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
for tf_tokenizer in self.tf_tokenizers:
A : Union[str, Any] =tf.function(SCREAMING_SNAKE_CASE__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
A : Union[str, Any] =tf.constant(SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =compiled_tokenizer(SCREAMING_SNAKE_CASE__ )
A : int =tf_tokenizer(SCREAMING_SNAKE_CASE__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Any:
for tf_tokenizer in self.tf_tokenizers:
A : Tuple =ModelToSave(tokenizer=SCREAMING_SNAKE_CASE__ )
A : List[str] =tf.convert_to_tensor(self.test_sentences )
A : Union[str, Any] =model(SCREAMING_SNAKE_CASE__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
A : int =Path(SCREAMING_SNAKE_CASE__ ) / 'saved.model'
model.save(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tf.keras.models.load_model(SCREAMING_SNAKE_CASE__ )
A : List[Any] =loaded_model(SCREAMING_SNAKE_CASE__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 710 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[str] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : Dict =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : List[Any] =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Tuple =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : Dict =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : List[Any] =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : Optional[int] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : Any =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[Any] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Optional[Any] =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : List[Any] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: Dict ) -> int:
if _re_test_backend.search(lowercase ) is None:
return None
A : Any =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Any ) -> List[Any]:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : Optional[Any] =f.readlines()
A : Dict =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Optional[int] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : int =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : int =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : int =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : Optional[int] =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : Dict =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : str ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Optional[int] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : Optional[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : Optional[Any] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : int =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : Optional[int] =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : Optional[int] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : Optional[Any] =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : Any =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Optional[Any] ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : str =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Optional[Any] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : Any =lines[line_index]
A : Any =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Dict =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Any, lowercase: int ) -> Dict:
def find_duplicates(lowercase: List[str] ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : List[Any] =[]
for key in import_dict_objects.keys():
A : List[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : Tuple =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> List[str]:
A : Dict =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Any =os.path.join(lowercase, '__init__.py' )
A : Union[str, Any] =parse_init(lowercase )
if objects is not None:
A : str =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Any =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> int:
A : List[str] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : Any =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : List[str] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : Optional[Any] =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : Dict =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Tuple =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> Tuple:
# This is to make sure the transformers module imported is the one in the repo.
A : str =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : Any =spec.loader.load_module()
A : Any =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : Dict ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 661 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[str] =logging.get_logger(__name__)
_lowercase : int ={
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : str = "mvp"
lowercase : Optional[Any] = ["past_key_values"]
lowercase : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=5_02_67 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=10_24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : Tuple=40_96 , SCREAMING_SNAKE_CASE__ : List[Any]=16 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : int=40_96 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.0_2 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=1_00 , SCREAMING_SNAKE_CASE__ : Any=8_00 , **SCREAMING_SNAKE_CASE__ : Any , ) -> Optional[Any]:
A : Tuple =vocab_size
A : Dict =max_position_embeddings
A : Union[str, Any] =d_model
A : str =encoder_ffn_dim
A : Any =encoder_layers
A : str =encoder_attention_heads
A : Tuple =decoder_ffn_dim
A : List[Any] =decoder_layers
A : Tuple =decoder_attention_heads
A : str =dropout
A : Union[str, Any] =attention_dropout
A : Tuple =activation_dropout
A : Tuple =activation_function
A : Optional[int] =init_std
A : str =encoder_layerdrop
A : int =decoder_layerdrop
A : Optional[Any] =classifier_dropout
A : Dict =use_cache
A : Tuple =encoder_layers
A : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
A : Dict =use_prompt
A : List[Any] =prompt_length
A : str =prompt_mid_dim
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , SCREAMING_SNAKE_CASE__ ):
A : int =self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 711 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : Any =logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
lowercase : Optional[str] = field(
default="linear" , metadata={"help": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 661 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowercase : Any ='''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowercase : Any ='''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
_lowercase : int ='''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Any:
A : Any =len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
A : Union[str, Any] =[[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
A : List[Any] =TER(
normalized=SCREAMING_SNAKE_CASE__ , no_punct=SCREAMING_SNAKE_CASE__ , asian_support=SCREAMING_SNAKE_CASE__ , case_sensitive=SCREAMING_SNAKE_CASE__ , )
A : str =sb_ter.corpus_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 712 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : int =2
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
A , A , A , A : Optional[Any] =bos, unk, pad, eos
A : Dict =[]
A : Union[str, Any] =[]
A : Any ={}
A : int =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : Any =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return self.indices == other.indices
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
A : Union[str, Any] =cls()
d.add_from_file(SCREAMING_SNAKE_CASE__ )
return d
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if word in self.indices and not overwrite:
A : int =self.indices[word]
A : Union[str, Any] =self.count[idx] + n
return idx
else:
A : Tuple =len(self.symbols )
A : str =idx
self.symbols.append(SCREAMING_SNAKE_CASE__ )
self.count.append(SCREAMING_SNAKE_CASE__ )
return idx
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE__ ) )
return
A : str =f.readlines()
A : int =self._load_meta(SCREAMING_SNAKE_CASE__ )
for line in lines[indices_start_line:]:
try:
A , A : Optional[int] =line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A : int =True
A , A : Optional[Any] =line.rsplit(' ' , 1 )
else:
A : Any =False
A : Tuple =int(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE__ ) )
self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A__ ( lowercase: Union[str, Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A : int =dict((re.sub(r'@@$', '', lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', lowercase ), v) for k, v in d.items() )
A : int ='<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : List[Any] =d[k] # restore
return da
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> str:
# prep
if not os.path.exists(lowercase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase, exist_ok=lowercase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[str] =os.path.join(lowercase, 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Optional[Any] =torch.load(lowercase, map_location='cpu' )
A : Any =chkpt['cfg']['model']
# dicts
A : Any =os.path.join(lowercase, 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Dict =Dictionary.load(lowercase )
A : Optional[Any] =rewrite_dict_keys(src_dict.indices )
A : Tuple =len(lowercase )
A : Any =os.path.join(lowercase, VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
A : List[str] =os.path.join(lowercase, 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : List[str] =os.path.join(lowercase, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase, lowercase )
# model config
A : Tuple =os.path.join(lowercase, 'config.json' )
A : Tuple ={
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
A : int =os.path.join(lowercase, lowercase )
A : List[str] ={
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
A : List[Any] =chkpt['model']
# remove unneeded keys
A : List[Any] =[
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
A : str =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A : Union[str, Any] =model_state_dict.pop(lowercase )
else:
A : List[str] =model_state_dict.pop(lowercase )
A : Any =BioGptConfig.from_pretrained(lowercase )
A : str =BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
A : Tuple =os.path.join(lowercase, lowercase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase, lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[Any] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 661 | 0 |
from __future__ import annotations
_lowercase : int =tuple[int, int, int]
_lowercase : Any =tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_lowercase : List[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
_lowercase : Dict ='''EGZWVONAHDCLFQMSIPJBYUKXTR'''
_lowercase : Tuple ='''FOBHMDKEXQNRAULPGSJVTYICZW'''
_lowercase : int ='''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
_lowercase : int ={
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
_lowercase : List[str] ='''RMDJXFUWGISLHVTCQNKYPBEZOA'''
_lowercase : Union[str, Any] ='''SGLCPQWZHKXAREONTFBVIYJUDM'''
_lowercase : Dict ='''HVSICLTYKQUBXDWAJZOMFGPREN'''
_lowercase : str ='''RZWQHFMVDBKICJLNTUXAGYPSOE'''
_lowercase : List[str] ='''LFKIJODBEGAMQPXVUHYSTCZRWN'''
_lowercase : int ='''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def A__ ( lowercase: RotorPositionT, lowercase: RotorSelectionT, lowercase: str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(lowercase ) )) < 3:
A : Any =F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(lowercase )
# Checks if rotor positions are valid
A : Optional[int] =rotpos
if not 0 < rotorposa <= len(lowercase ):
A : Union[str, Any] =F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(lowercase )
if not 0 < rotorposa <= len(lowercase ):
A : Optional[int] =F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(lowercase )
if not 0 < rotorposa <= len(lowercase ):
A : Dict =F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(lowercase )
# Validates string and returns dict
A : Any =_plugboard(lowercase )
return rotpos, rotsel, pbdict
def A__ ( lowercase: str ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(lowercase, lowercase ):
A : List[str] =F'Plugboard setting isn\'t type string ({type(lowercase )})'
raise TypeError(lowercase )
elif len(lowercase ) % 2 != 0:
A : List[Any] =F'Odd number of symbols ({len(lowercase )})'
raise Exception(lowercase )
elif pbstring == "":
return {}
pbstring.replace(' ', '' )
# Checks if all characters are unique
A : Dict =set()
for i in pbstring:
if i not in abc:
A : Union[str, Any] =F'\'{i}\' not in list of symbols'
raise Exception(lowercase )
elif i in tmppbl:
A : Dict =F'Duplicate symbol ({i})'
raise Exception(lowercase )
else:
tmppbl.add(lowercase )
del tmppbl
# Created the dictionary
A : Dict ={}
for j in range(0, len(lowercase ) - 1, 2 ):
A : str =pbstring[j + 1]
A : Union[str, Any] =pbstring[j]
return pb
def A__ ( lowercase: str, lowercase: RotorPositionT, lowercase: RotorSelectionT = (rotora, rotora, rotora), lowercase: str = "", ) -> str:
A : Optional[Any] =text.upper()
A : Union[str, Any] =_validator(
lowercase, lowercase, plugb.upper() )
A : Any =rotor_position
A : Dict =rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
A : Union[str, Any] =[]
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
A : List[Any] =plugboard[symbol]
# rotor ra --------------------------
A : Optional[Any] =abc.index(lowercase ) + rotorposa
A : List[str] =rotora[index % len(lowercase )]
# rotor rb --------------------------
A : Tuple =abc.index(lowercase ) + rotorposa
A : Tuple =rotora[index % len(lowercase )]
# rotor rc --------------------------
A : Optional[Any] =abc.index(lowercase ) + rotorposa
A : Union[str, Any] =rotora[index % len(lowercase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
A : int =reflector[symbol]
# 2nd rotors
A : Optional[int] =abc[rotora.index(lowercase ) - rotorposa]
A : Optional[Any] =abc[rotora.index(lowercase ) - rotorposa]
A : Union[str, Any] =abc[rotora.index(lowercase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
A : Union[str, Any] =plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowercase ):
A : Optional[Any] =0
rotorposa += 1
if rotorposa >= len(lowercase ):
A : Union[str, Any] =0
rotorposa += 1
if rotorposa >= len(lowercase ):
A : Any =0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowercase )
return "".join(lowercase )
if __name__ == "__main__":
_lowercase : List[str] ='''This is my Python script that emulates the Enigma machine from WWII.'''
_lowercase : Optional[int] =(1, 1, 1)
_lowercase : Optional[int] ='''pictures'''
_lowercase : int =(rotora, rotora, rotora)
_lowercase : Any =enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 713 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_lowercase : str =False
_lowercase : Optional[Any] =False
def A__ ( lowercase: Namespace ) -> Optional[int]:
return TrainCommand(lowercase )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Optional[Any] =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE__ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Namespace ) -> List[Any]:
A : Optional[int] =logging.get_logger('transformers-cli/training' )
A : Dict ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =args.output
A : List[str] =args.column_label
A : int =args.column_text
A : Union[str, Any] =args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A : Optional[Any] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
A : Tuple =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Dict =None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
A : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Optional[Any] =args.validation_split
A : str =args.train_batch_size
A : Any =args.valid_batch_size
A : Dict =args.learning_rate
A : List[str] =args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 661 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowercase : Tuple ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase : List[str] ={
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
_lowercase : int ={
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
_lowercase : Tuple ={
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Tuple = VOCAB_FILES_NAMES
lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] = ElectraTokenizer
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : int="[PAD]" , SCREAMING_SNAKE_CASE__ : Dict="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Tuple:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
A : Optional[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
A : Union[str, Any] =getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
A : Any =do_lower_case
A : Any =strip_accents
A : int =tokenize_chinese_chars
A : Tuple =normalizer_class(**SCREAMING_SNAKE_CASE__ )
A : Tuple =do_lower_case
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=None ) -> Tuple:
A : int =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
A : List[str] =[self.sep_token_id]
A : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
A : Dict =self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 714 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Tuple=30 , SCREAMING_SNAKE_CASE__ : int=4_00 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Any=1 / 2_55 , SCREAMING_SNAKE_CASE__ : int=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Optional[Any] =size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
A : Union[str, Any] =parent
A : Union[str, Any] =batch_size
A : Union[str, Any] =num_channels
A : int =min_resolution
A : List[Any] =max_resolution
A : Dict =do_resize
A : Tuple =size
A : List[str] =do_normalize
A : List[Any] =image_mean
A : Dict =image_std
A : Any =do_rescale
A : List[str] =rescale_factor
A : Optional[Any] =do_pad
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Dict:
if not batched:
A : Any =image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
A , A : Union[str, Any] =image.size
else:
A , A : Tuple =image.shape[1], image.shape[2]
if w < h:
A : Any =int(self.size['shortest_edge'] * h / w )
A : Any =self.size['shortest_edge']
elif w > h:
A : Dict =self.size['shortest_edge']
A : Dict =int(self.size['shortest_edge'] * w / h )
else:
A : List[str] =self.size['shortest_edge']
A : Dict =self.size['shortest_edge']
else:
A : List[Any] =[]
for image in image_inputs:
A , A : int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : str =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
A : Tuple =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
A : str =ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
A : str =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
A : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A : List[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : List[str] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : Union[str, Any] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A : Tuple =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Any =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : Optional[int] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A : Optional[int] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Tuple =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : int =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
# prepare image and target
A : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A : List[Any] =json.loads(f.read() )
A : Any ={'image_id': 3_97_69, 'annotations': target}
# encode them
A : str =ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
A : Any =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Optional[Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : List[str] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Dict =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : str =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : Dict =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : Union[str, Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
A : List[Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : int =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
# prepare image, target and masks_path
A : List[str] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A : Optional[int] =json.loads(f.read() )
A : int ={'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
A : Optional[Any] =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A : List[Any] =ConditionalDetrImageProcessor(format='coco_panoptic' )
A : Union[str, Any] =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Dict =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : Dict =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Optional[int] =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : List[Any] =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Any =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : List[Any] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : Any =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : str =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
A : int =82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
A : Any =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : str =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
| 661 | 0 |
def A__ ( lowercase: int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : List[Any] =1_6
_lowercase : Union[str, Any] =3_2
def A__ ( lowercase: Accelerator, lowercase: int = 16, lowercase: str = "bert-base-cased" ) -> Optional[int]:
A : List[Any] =AutoTokenizer.from_pretrained(lowercase )
A : Any =load_dataset('glue', 'mrpc' )
def tokenize_function(lowercase: Any ):
# max_length=None => use the model max length (it's actually the default)
A : List[str] =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A : Any =datasets.map(
lowercase, batched=lowercase, remove_columns=['idx', 'sentence1', 'sentence2'], load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : Dict =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowercase: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase, padding='max_length', max_length=128, return_tensors='pt' )
return tokenizer.pad(lowercase, padding='longest', return_tensors='pt' )
# Instantiate dataloaders.
A : Union[str, Any] =DataLoader(
tokenized_datasets['train'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
A : str =DataLoader(
tokenized_datasets['validation'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
def A__ ( lowercase: Dict, lowercase: Optional[int], lowercase: Any, lowercase: str ) -> Tuple:
model.eval()
A : Tuple =0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A : Tuple =model(**lowercase )
A : Tuple =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A : Union[str, Any] =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
A : List[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
A : Optional[int] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase, references=lowercase, )
A : Union[str, Any] =metric.compute()
return eval_metric["accuracy"]
def A__ ( lowercase: Union[str, Any], lowercase: Dict ) -> List[str]:
# Initialize accelerator
A : Optional[int] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : int =config['lr']
A : Optional[Any] =int(config['num_epochs'] )
A : Union[str, Any] =int(config['seed'] )
A : List[str] =int(config['batch_size'] )
A : Optional[Any] =args.model_name_or_path
set_seed(lowercase )
A , A : str =get_dataloaders(lowercase, lowercase, lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : List[str] =AutoModelForSequenceClassification.from_pretrained(lowercase, return_dict=lowercase )
# Instantiate optimizer
A : Any =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A : List[str] =optimizer_cls(params=model.parameters(), lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
A : Optional[int] =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A : Dict =1
A : Union[str, Any] =(len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A : List[Any] =get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=0, num_training_steps=lowercase, )
else:
A : List[str] =DummyScheduler(lowercase, total_num_steps=lowercase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A : Optional[int] =accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# We need to keep track of how many total steps we have iterated over
A : Tuple =0
# We also need to keep track of the stating epoch so files are named properly
A : List[str] =0
A : Tuple =evaluate.load('glue', 'mrpc' )
A : Optional[int] =num_epochs
if args.partial_train_epoch is not None:
A : Dict =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A : List[Any] =args.resume_from_checkpoint.split('epoch_' )[1]
A : List[Any] =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A : Union[str, Any] =int(lowercase ) + 1
A : List[str] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
accelerator.print('resumed checkpoint performance:', lowercase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:', lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:', optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir, F'state_{starting_epoch-1}.json' ), 'r' ) as f:
A : Union[str, Any] =json.load(lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A : str ={}
for epoch in range(lowercase, lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
A : Tuple =model(**lowercase )
A : List[Any] =outputs.loss
A : Any =loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A : Union[str, Any] =F'epoch_{epoch}'
A : Optional[Any] =os.path.join(args.output_dir, lowercase )
accelerator.save_state(lowercase )
A : Optional[Any] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
A : Dict =accuracy
A : Optional[Any] =lr_scheduler.get_lr()[0]
A : Any =optimizer.param_groups[0]['lr']
A : str =epoch
A : Dict =overall_step
accelerator.print(F'epoch {epoch}:', lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'state_{epoch}.json' ), 'w' ) as f:
json.dump(lowercase, lowercase )
def A__ ( ) -> Optional[int]:
A : Optional[int] =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path', type=lowercase, default='bert-base-cased', help='Path to pretrained model or model identifier from huggingface.co/models.', required=lowercase, )
parser.add_argument(
'--output_dir', type=lowercase, default='.', help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.', )
parser.add_argument(
'--resume_from_checkpoint', type=lowercase, default=lowercase, help='If the training should continue from a checkpoint folder.', )
parser.add_argument(
'--partial_train_epoch', type=lowercase, default=lowercase, help='If passed, the training will stop after this number of epochs.', )
parser.add_argument(
'--num_epochs', type=lowercase, default=2, help='Number of train epochs.', )
A : str =parser.parse_args()
A : Optional[int] ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 661 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase : Dict =True
except ImportError:
_lowercase : Union[str, Any] =False
_lowercase : Any =logging.get_logger(__name__) # pylint: disable=invalid-name
def A__ ( lowercase: Namespace ) -> Optional[Any]:
'''simple docstring'''
return AddNewModelCommand(args.testing, args.testing_file, path=args.path )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Optional[int]:
A : Tuple =parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=SCREAMING_SNAKE_CASE__ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=SCREAMING_SNAKE_CASE__ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=None , *SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
A : Union[str, Any] =testing
A : str =testing_file
A : List[Any] =path
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A : List[str] =[directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A : Optional[int] =(
Path(SCREAMING_SNAKE_CASE__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A : int =path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(SCREAMING_SNAKE_CASE__ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
A : List[str] =json.load(SCREAMING_SNAKE_CASE__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=SCREAMING_SNAKE_CASE__ , extra_context=SCREAMING_SNAKE_CASE__ , )
A : Optional[Any] =[directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
A : Union[str, Any] =json.load(SCREAMING_SNAKE_CASE__ )
A : List[str] =configuration['lowercase_modelname']
A : Tuple =configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'{directory}/configuration.json' )
A : Union[str, Any] ='PyTorch' in generate_tensorflow_pytorch_and_flax
A : List[Any] ='TensorFlow' in generate_tensorflow_pytorch_and_flax
A : str ='Flax' in generate_tensorflow_pytorch_and_flax
A : Tuple =f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=SCREAMING_SNAKE_CASE__ )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , 'w' ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
A : Union[str, Any] =f.readlines()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(SCREAMING_SNAKE_CASE__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
# Create temp file
A : Optional[int] =mkstemp()
A : Union[str, Any] =False
with fdopen(SCREAMING_SNAKE_CASE__ , 'w' ) as new_file:
with open(SCREAMING_SNAKE_CASE__ ) as old_file:
for line in old_file:
new_file.write(SCREAMING_SNAKE_CASE__ )
if line_to_copy_below in line:
A : List[Any] =True
for line_to_copy in lines_to_copy:
new_file.write(SCREAMING_SNAKE_CASE__ )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Remove original file
remove(SCREAMING_SNAKE_CASE__ )
# Move new file
move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def skip_units(SCREAMING_SNAKE_CASE__ : Any ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(SCREAMING_SNAKE_CASE__ : Tuple ):
with open(SCREAMING_SNAKE_CASE__ ) as datafile:
A : Any =[]
A : Tuple =False
A : int =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A : List[str] =line.split('"' )[1]
A : Union[str, Any] =skip_units(SCREAMING_SNAKE_CASE__ )
elif "# Below: " in line and "##" not in line:
A : Dict =line.split('"' )[1]
A : str =skip_units(SCREAMING_SNAKE_CASE__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =[]
elif "# Replace with" in line and "##" not in line:
A : Optional[int] =[]
elif "##" not in line:
lines_to_copy.append(SCREAMING_SNAKE_CASE__ )
remove(SCREAMING_SNAKE_CASE__ )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(SCREAMING_SNAKE_CASE__ )
| 716 |
def A__ ( lowercase: int ) -> int:
if not isinstance(lowercase, lowercase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A : Any =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowercase : Optional[Any] =logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Any = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A : str =deprecated_arg[3:]
setattr(self , SCREAMING_SNAKE_CASE__ , not kwargs.pop(SCREAMING_SNAKE_CASE__ ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
A : Dict =kwargs.pop('torchscript' , self.torchscript )
A : List[str] =kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
A : Union[str, Any] =kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Trace the models using torchscript"} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
lowercase : str = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple["torch.device", int]:
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
A : List[str] =torch.device('cpu' )
A : Union[str, Any] =0
elif is_torch_tpu_available():
A : Union[str, Any] =xm.xla_device()
A : Union[str, Any] =0
else:
A : List[str] =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
A : Tuple =torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> "torch.device":
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
return self.n_gpu > 0
| 717 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A__ ( *lowercase: Tuple, lowercase: Optional[Union[Dict, Any]] = None, lowercase: Dict=True, lowercase: Any=2 ) -> List[Any]:
from .. import __version__
A : Optional[Any] =take_from
A : Union[str, Any] =()
if not isinstance(args[0], lowercase ):
A : List[str] =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase ).base_version ) >= version.parse(lowercase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
A : Tuple =None
if isinstance(lowercase, lowercase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase ),)
A : Union[str, Any] =F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(lowercase, lowercase ):
values += (getattr(lowercase, lowercase ),)
A : Optional[Any] =F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A : List[Any] =F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A : List[Any] =warning + ' ' if standard_warn else ''
warnings.warn(warning + message, lowercase, stacklevel=lowercase )
if isinstance(lowercase, lowercase ) and len(lowercase ) > 0:
A : Any =inspect.getouterframes(inspect.currentframe() )[1]
A : int =call_frame.filename
A : int =call_frame.lineno
A : Optional[int] =call_frame.function
A , A : int =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(lowercase ) == 0:
return
elif len(lowercase ) == 1:
return values[0]
return values
| 661 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowercase : Optional[Any] ='''
Human: <<task>>
Assistant: '''
_lowercase : Union[str, Any] ='''huggingface-tools/default-prompts'''
_lowercase : Any ={'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def A__ ( lowercase: List[Any], lowercase: Dict, lowercase: str="run" ) -> List[Any]:
if prompt_or_repo_id is None:
A : List[str] =DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s', lowercase ) is not None:
return prompt_or_repo_id
A : str =cached_file(
lowercase, PROMPT_FILES[mode], repo_type='dataset', user_agent={'agent': agent_name} )
with open(lowercase, 'r', encoding='utf-8' ) as f:
return f.read()
| 718 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( lowercase: int, lowercase: str ) -> Dict:
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Dict, lowercase: Tuple, lowercase: str ) -> str:
A : Any =tmp_path / 'cache'
A : Dict ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Dict =JsonDatasetReader(lowercase, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Any, lowercase: Union[str, Any] ) -> Tuple:
A : Tuple =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : Optional[Any] =features.copy() if features else default_expected_features
A : Union[str, Any] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : str =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
], )
def A__ ( lowercase: Optional[int], lowercase: str, lowercase: Dict ) -> Optional[int]:
A : int =tmp_path / 'cache'
A : Tuple ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
A : int =features.copy() if features else default_expected_features
A : str =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( lowercase: Optional[Any], lowercase: str ) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A : str ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
A : Dict =features.copy()
A : List[str] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : int =tmp_path / 'cache'
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Union[str, Any], lowercase: Any, lowercase: str ) -> Optional[Any]:
A : Optional[int] =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =JsonDatasetReader(lowercase, cache_dir=lowercase, split=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def A__ ( lowercase: Optional[Any], lowercase: int, lowercase: Union[str, Any] ) -> List[Any]:
if issubclass(lowercase, lowercase ):
A : int =jsonl_path
elif issubclass(lowercase, lowercase ):
A : Any =[jsonl_path]
A : Optional[Any] =tmp_path / 'cache'
A : Tuple ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[str] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
def A__ ( lowercase: List[str], lowercase: Tuple, lowercase: Optional[Any]=("train",) ) -> Tuple:
assert isinstance(lowercase, lowercase )
for split in splits:
A : List[str] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Tuple, lowercase: Optional[int], lowercase: Any ) -> str:
A : List[str] =tmp_path / 'cache'
A : Union[str, Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : str =JsonDatasetReader({'train': jsonl_path}, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Optional[int] ) -> Tuple:
A : Any =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =features.copy() if features else default_expected_features
A : Dict =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[Any] =JsonDatasetReader({'train': jsonl_path}, features=lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any] ) -> Tuple:
if split:
A : Optional[int] ={split: jsonl_path}
else:
A : Dict ='train'
A : Optional[Any] ={'train': jsonl_path, 'test': jsonl_path}
A : Tuple =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[Any] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( lowercase: List[Any] ) -> Tuple:
return json.load(lowercase )
def A__ ( lowercase: List[Any] ) -> Tuple:
return [json.loads(lowercase ) for line in buffer]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : Any =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : List[Any] =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
A : Union[str, Any] =tmp_path_factory.mktemp('data' ) / f'test.json.{extension}'
A : Union[str, Any] =str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : str =f.read()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : List[str] =f.read()
assert exported_content == original_content
| 661 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A__ ( lowercase: List[Any], lowercase: Optional[int] ) -> Optional[Any]:
A : List[str] =checkpoint
A : int ={}
A : str =vae_state_dict['encoder.conv_in.weight']
A : Any =vae_state_dict['encoder.conv_in.bias']
A : str =vae_state_dict['encoder.conv_out.weight']
A : Optional[int] =vae_state_dict['encoder.conv_out.bias']
A : str =vae_state_dict['encoder.norm_out.weight']
A : List[Any] =vae_state_dict['encoder.norm_out.bias']
A : List[str] =vae_state_dict['decoder.conv_in.weight']
A : Dict =vae_state_dict['decoder.conv_in.bias']
A : Optional[int] =vae_state_dict['decoder.conv_out.weight']
A : Tuple =vae_state_dict['decoder.conv_out.bias']
A : Any =vae_state_dict['decoder.norm_out.weight']
A : Optional[Any] =vae_state_dict['decoder.norm_out.bias']
A : Union[str, Any] =vae_state_dict['quant_conv.weight']
A : str =vae_state_dict['quant_conv.bias']
A : Tuple =vae_state_dict['post_quant_conv.weight']
A : Optional[Any] =vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
A : List[str] =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
A : List[str] ={
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(lowercase )
}
# Retrieves the keys for the decoder up blocks only
A : List[Any] =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
A : Any ={
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(lowercase )
}
for i in range(lowercase ):
A : List[str] =[key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
A : Union[str, Any] =vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
A : Dict =vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
A : List[str] =renew_vae_resnet_paths(lowercase )
A : Union[str, Any] ={'old': F'down.{i}.block', 'new': F'down_blocks.{i}.resnets'}
assign_to_checkpoint(lowercase, lowercase, lowercase, additional_replacements=[meta_path], config=lowercase )
A : str =[key for key in vae_state_dict if 'encoder.mid.block' in key]
A : int =2
for i in range(1, num_mid_res_blocks + 1 ):
A : Optional[Any] =[key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
A : List[Any] =renew_vae_resnet_paths(lowercase )
A : List[str] ={'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowercase, lowercase, lowercase, additional_replacements=[meta_path], config=lowercase )
A : int =[key for key in vae_state_dict if 'encoder.mid.attn' in key]
A : List[Any] =renew_vae_attention_paths(lowercase )
A : List[str] ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowercase, lowercase, lowercase, additional_replacements=[meta_path], config=lowercase )
conv_attn_to_linear(lowercase )
for i in range(lowercase ):
A : List[str] =num_up_blocks - 1 - i
A : Any =[
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
A : Union[str, Any] =vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
A : int =vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
A : Tuple =renew_vae_resnet_paths(lowercase )
A : str ={'old': F'up.{block_id}.block', 'new': F'up_blocks.{i}.resnets'}
assign_to_checkpoint(lowercase, lowercase, lowercase, additional_replacements=[meta_path], config=lowercase )
A : int =[key for key in vae_state_dict if 'decoder.mid.block' in key]
A : List[Any] =2
for i in range(1, num_mid_res_blocks + 1 ):
A : Union[str, Any] =[key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
A : List[str] =renew_vae_resnet_paths(lowercase )
A : Any ={'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowercase, lowercase, lowercase, additional_replacements=[meta_path], config=lowercase )
A : Dict =[key for key in vae_state_dict if 'decoder.mid.attn' in key]
A : Union[str, Any] =renew_vae_attention_paths(lowercase )
A : Union[str, Any] ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowercase, lowercase, lowercase, additional_replacements=[meta_path], config=lowercase )
conv_attn_to_linear(lowercase )
return new_checkpoint
def A__ ( lowercase: str, lowercase: str, ) -> Optional[Any]:
# Only support V1
A : str =requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
A : Union[str, Any] =io.BytesIO(r.content )
A : Union[str, Any] =OmegaConf.load(lowercase )
A : str =512
A : Optional[int] ='cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
A : int ={}
with safe_open(lowercase, framework='pt', device='cpu' ) as f:
for key in f.keys():
A : str =f.get_tensor(lowercase )
else:
A : Any =torch.load(lowercase, map_location=lowercase )['state_dict']
# Convert the VAE model.
A : Optional[Any] =create_vae_diffusers_config(lowercase, image_size=lowercase )
A : Any =custom_convert_ldm_vae_checkpoint(lowercase, lowercase )
A : Optional[Any] =AutoencoderKL(**lowercase )
vae.load_state_dict(lowercase )
vae.save_pretrained(lowercase )
if __name__ == "__main__":
_lowercase : Optional[int] =argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
_lowercase : int =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 719 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] = DDIMPipeline
lowercase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
A : str =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
A : Optional[int] =DDIMScheduler()
A : Optional[Any] ={'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
A : List[Any] =torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A : Union[str, Any] =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A : Optional[int] ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
A : Union[str, Any] ='cpu'
A : Tuple =self.get_dummy_components()
A : Union[str, Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : str =self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
A : str =pipe(**SCREAMING_SNAKE_CASE__ ).images
A : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A : Optional[Any] =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Any ='google/ddpm-cifar10-32'
A : Optional[int] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMScheduler()
A : int =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Dict =torch.manual_seed(0 )
A : Optional[Any] =ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='numpy' ).images
A : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Tuple =np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : Optional[int] ='google/ddpm-ema-bedroom-256'
A : str =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : Optional[int] =ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
A : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Optional[int] =np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 661 | 0 |
'''simple docstring'''
_lowercase : List[Any] ='''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowercase : Tuple =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowercase : List[Any] ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 720 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
A : Dict =tempfile.mkdtemp()
A : int =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
A : str =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Optional[int] =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
A : Optional[int] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : str =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Union[str, Any] =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
A : Optional[Any] =self.get_image_processor()
A : Optional[Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Dict =self.prepare_image_inputs()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : Optional[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
A : str =self.get_image_processor()
A : Union[str, Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : str =[torch.ones((1, 3, 5, 5) )]
A : Optional[Any] =[[17_64, 26_46]]
A : List[Any] =[[6_83, 10_24]]
A : Union[str, Any] =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : str =[np.ones((1, 3, 5, 5) )]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A : Any =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
A : Tuple =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
A : Optional[Any] =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Any =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
A : Optional[Any] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[Any] =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Dict =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
A : Any =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : Tuple =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : List[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
A : int =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =[tf.ones((1, 3, 5, 5) )]
A : Tuple =[[17_64, 26_46]]
A : Union[str, Any] =[[6_83, 10_24]]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : List[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : Any =[np.ones((1, 3, 5, 5) )]
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A : List[str] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
A : Optional[int] =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Dict =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
A : Any =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Tuple =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
A : Optional[Any] =self.get_image_processor()
A : Dict =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A : Optional[int] =[tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )]
A : Union[str, Any] =[torch.tensor(SCREAMING_SNAKE_CASE__ )]
A : int =[[17_64, 26_46]]
A : int =[[6_83, 10_24]]
A : Dict =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
A : Union[str, Any] =self.get_image_processor()
A : int =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : List[Any] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Tuple =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
A : Dict =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
| 661 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
torch.manual_seed(0 )
A : Optional[int] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : Dict =self.dummy_uncond_unet
A : int =ScoreSdeVeScheduler()
A : Union[str, Any] =ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =torch.manual_seed(0 )
A : Dict =sde_ve(num_inference_steps=2 , output_type='numpy' , generator=SCREAMING_SNAKE_CASE__ ).images
A : List[Any] =torch.manual_seed(0 )
A : List[str] =sde_ve(num_inference_steps=2 , output_type='numpy' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[
0
]
A : List[Any] =image[0, -3:, -3:, -1]
A : List[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : str =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
A : int ='google/ncsnpp-church-256'
A : Dict =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : List[str] =sde_ve(num_inference_steps=10 , output_type='numpy' , generator=SCREAMING_SNAKE_CASE__ ).images
A : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Dict =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 721 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_lowercase : Optional[Any] =WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def A__ ( lowercase: Optional[int] ) -> Optional[int]:
A : str =test_results.split(' ' )
A : List[str] =0
A : Tuple =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A : List[str] =expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( lowercase: List[Any] ) -> str:
A : Union[str, Any] ={}
A : Optional[Any] =None
A : Union[str, Any] =False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]', lowercase ):
A : List[Any] =True
A : Any =line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
A : Dict =line
A : List[str] =False
return failures
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
A : Tuple =title
A : Dict =doc_test_results['time_spent'].split(',' )[0]
A : Union[str, Any] =doc_test_results['success']
A : Any =doc_test_results['failures']
A : Optional[Any] =self.n_success + self.n_failures
# Failures and success of the modeling tests
A : Union[str, Any] =doc_test_results
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
A : Any =[self._time_spent]
A : List[str] =0
for time in time_spent:
A : List[Any] =time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE__ ) == 1:
A : List[str] =[0, 0, time_parts[0]]
A , A , A : Tuple =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A , A , A : str =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s'
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Tuple =40
A : Optional[Any] ={k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
A : Any =''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
A : Optional[int] =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE__ )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
A : Tuple =[
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
A : Any =f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
A : Dict =client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
A : List[str] =''
for key, value in failures.items():
A : Any =value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
A : Union[str, Any] =job_name
A : Any ={'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
A : int ={
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
A : Union[str, Any] =self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
A : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
A : Any =f'*Num failures* :{len(job_result["failed"] )} \n'
A : List[Any] =job_result['failures']
A : Any =self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def A__ ( ) -> Union[str, Any]:
A : Any =os.environ['GITHUB_RUN_ID']
A : List[Any] =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
A : Union[str, Any] =requests.get(lowercase ).json()
A : List[Any] ={}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
A : List[str] =math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowercase ):
A : List[str] =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.', lowercase )
return {}
def A__ ( lowercase: str ) -> Optional[Any]:
A : Any ={}
if os.path.exists(lowercase ):
A : List[Any] =os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase, lowercase ), encoding='utf-8' ) as f:
A : Optional[int] =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase, lowercase )}.' ) from e
return _artifact
def A__ ( ) -> int:
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
A : Dict =name
A : Dict =[]
def __str__( self : Optional[Any] ) -> List[str]:
return self.name
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.paths.append({'name': self.name, 'path': path} )
A : Dict[str, Artifact] ={}
A : str =filter(os.path.isdir, os.listdir() )
for directory in directories:
A : Tuple =directory
if artifact_name not in _available_artifacts:
A : int =Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
_lowercase : Optional[int] =get_job_links()
_lowercase : str =retrieve_available_artifacts()
_lowercase : List[Any] =collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_lowercase : Optional[Any] ={
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_lowercase : List[Any] =github_actions_job_links.get('''run_doctests''')
_lowercase : int =available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_lowercase : Dict =retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_lowercase , _lowercase , _lowercase : List[Any] =handle_test_results(artifact['''stats'''])
_lowercase : Any =failed
_lowercase : Union[str, Any] =success
_lowercase : str =time_spent[1:-1] + ''', '''
_lowercase : Any =extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_lowercase : Tuple =line.replace('''FAILED ''', '''''')
_lowercase : int =line.split()[0].replace('''\n''', '''''')
if "::" in line:
_lowercase , _lowercase : str =line.split('''::''')
else:
_lowercase , _lowercase : Union[str, Any] =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_lowercase : Any =docs[file_regex]
doc_test_results[category]["failed"].append(test)
_lowercase : Any =all_failures[test] if test in all_failures else '''N/A'''
_lowercase : Tuple =failure
break
_lowercase : Optional[int] =Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 661 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Dict ={
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any =[
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
_lowercase : Optional[Any] =['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowercase : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 |
_lowercase : Dict ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 661 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def A__ ( lowercase: np.ndarray ) -> np.ndarray:
A : Dict =rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def A__ ( lowercase: np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def A__ ( lowercase: np.ndarray, lowercase: np.ndarray ) -> np.ndarray:
A : int =np.zeros_like(lowercase )
A : Optional[int] =np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
A : Optional[int] =image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
A : str =(
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
A : List[Any] =int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_lowercase : Any =Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
_lowercase : Tuple =np.array(Image.open(lena_path))
# kernel to be applied
_lowercase : Dict =np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_lowercase : Optional[int] =dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_lowercase : List[str] =Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 701 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( lowercase: str ) -> List[str]:
def decorator(lowercase: int ):
A : Tuple =getattr(lowercase, 'handle_key', [] )
handle += [key]
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
def A__ ( *lowercase: List[str] ) -> Dict:
def decorator(lowercase: Union[str, Any] ):
A : Optional[int] =getattr(lowercase, 'handle_key', [] )
handle += keys
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __new__( cls : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
A : Dict =super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , 'key_handler' ):
setattr(SCREAMING_SNAKE_CASE__ , 'key_handler' , {} )
setattr(SCREAMING_SNAKE_CASE__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
A : Optional[Any] =getattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , [] )
for key in handled_keys:
A : str =value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ) -> Any:
A : str =get_character()
if char != KEYMAP["undefined"]:
A : List[str] =ord(SCREAMING_SNAKE_CASE__ )
A : List[str] =cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
A : List[str] =char
return handler(cls )
else:
return None
def A__ ( cls: Optional[int] ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 661 | 0 |
def A__ ( lowercase: int, lowercase: list ) -> Dict:
_enforce_args(lowercase, lowercase )
if n == 0:
return 0
A : Optional[int] =float('-inf' )
for i in range(1, n + 1 ):
A : Union[str, Any] =max(
lowercase, prices[i - 1] + naive_cut_rod_recursive(n - i, lowercase ) )
return max_revue
def A__ ( lowercase: int, lowercase: list ) -> Optional[Any]:
_enforce_args(lowercase, lowercase )
A : Dict =[float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase, lowercase, lowercase )
def A__ ( lowercase: int, lowercase: list, lowercase: list ) -> Optional[Any]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
A : Dict =float('-inf' )
for i in range(1, n + 1 ):
A : int =max(
lowercase, prices[i - 1] + _top_down_cut_rod_recursive(n - i, lowercase, lowercase ), )
A : str =max_revenue
return max_rev[n]
def A__ ( lowercase: int, lowercase: list ) -> Optional[int]:
_enforce_args(lowercase, lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
A : Dict =[float('-inf' ) for _ in range(n + 1 )]
A : List[Any] =0
for i in range(1, n + 1 ):
A : Dict =max_rev[i]
for j in range(1, i + 1 ):
A : List[str] =max(lowercase, prices[j - 1] + max_rev[i - j] )
A : int =max_revenue_i
return max_rev[n]
def A__ ( lowercase: int, lowercase: list ) -> int:
if n < 0:
A : Tuple =F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
A : Dict =(
'Each integral piece of rod must have a corresponding price. '
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def A__ ( ) -> List[str]:
A : Optional[Any] =[6, 10, 12, 15, 20, 23]
A : List[str] =len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
A : Union[str, Any] =36
A : Dict =top_down_cut_rod(lowercase, lowercase )
A : Tuple =bottom_up_cut_rod(lowercase, lowercase )
A : Union[str, Any] =naive_cut_rod_recursive(lowercase, lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 702 |
import math
def A__ ( lowercase: int ) -> list:
A : Optional[Any] =[True] * n
A : Tuple =False
A : List[Any] =False
A : Dict =True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A : Dict =i * 2
while index < n:
A : Dict =False
A : Dict =index + i
A : Tuple =[2]
for i in range(3, lowercase, 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def A__ ( lowercase: int = 999_966_663_333 ) -> int:
A : Optional[int] =math.floor(math.sqrt(lowercase ) ) + 100
A : Optional[int] =prime_sieve(lowercase )
A : Optional[Any] =0
A : List[Any] =0
A : Union[str, Any] =primes[prime_index]
while (last_prime**2) <= limit:
A : Tuple =primes[prime_index + 1]
A : Optional[int] =last_prime**2
A : Tuple =next_prime**2
# Get numbers divisible by lps(current)
A : int =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A : List[Any] =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A : Any =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A : List[str] =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 661 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Union[str, Any] =logging.get_logger(__name__)
def A__ ( lowercase: List[Any], lowercase: Tuple=False ) -> str:
A : Union[str, Any] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A : List[Any] =[(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Union[str, Any]=False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
if base_model:
A : Optional[Any] =''
else:
A : str ='vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A : Dict =state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
A : str =state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A : Optional[Any] =in_proj_weight[
: config.hidden_size, :
]
A : str =in_proj_bias[: config.hidden_size]
A : List[str] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A : Dict =in_proj_weight[
-config.hidden_size :, :
]
A : Any =in_proj_bias[-config.hidden_size :]
def A__ ( lowercase: Tuple ) -> Tuple:
A : List[str] =['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase, lowercase )
def A__ ( lowercase: Optional[int], lowercase: Optional[Any], lowercase: str ) -> Any:
A : str =dct.pop(lowercase )
A : Tuple =val
def A__ ( ) -> Optional[Any]:
A : int ='http://images.cocodataset.org/val2017/000000039769.jpg'
A : List[str] =Image.open(requests.get(lowercase, stream=lowercase ).raw )
return im
@torch.no_grad()
def A__ ( lowercase: str, lowercase: Union[str, Any], lowercase: Optional[int]=True ) -> Dict:
A : Any =ViTConfig()
# patch_size
if model_name[-1] == "8":
A : List[str] =8
# set labels if required
if not base_model:
A : Optional[int] =1_000
A : Optional[Any] ='huggingface/label-files'
A : Dict ='imagenet-1k-id2label.json'
A : int =json.load(open(hf_hub_download(lowercase, lowercase, repo_type='dataset' ), 'r' ) )
A : List[str] ={int(lowercase ): v for k, v in idalabel.items()}
A : Optional[int] =idalabel
A : Union[str, Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A : str =384
A : List[Any] =1_536
A : Tuple =12
A : List[str] =6
# load original model from torch hub
A : List[str] =torch.hub.load('facebookresearch/dino:main', lowercase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A : int =original_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
A : Dict =create_rename_keys(lowercase, base_model=lowercase )
for src, dest in rename_keys:
rename_key(lowercase, lowercase, lowercase )
read_in_q_k_v(lowercase, lowercase, lowercase )
# load HuggingFace model
if base_model:
A : List[str] =ViTModel(lowercase, add_pooling_layer=lowercase ).eval()
else:
A : Dict =ViTForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# Check outputs on an image, prepared by ViTImageProcessor
A : Optional[Any] =ViTImageProcessor()
A : str =image_processor(images=prepare_img(), return_tensors='pt' )
A : Tuple =encoding['pixel_values']
A : Optional[Any] =model(lowercase )
if base_model:
A : Dict =original_model(lowercase )
assert torch.allclose(lowercase, outputs.last_hidden_state[:, 0, :], atol=1e-1 )
else:
A : int =original_model(lowercase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase, outputs.logits, atol=1e-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowercase : List[Any] =parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 703 |
import heapq
def A__ ( lowercase: dict ) -> set[int]:
A : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase, [-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
A : Dict =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A : List[str] =heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A : str =elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 661 | 0 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] = DDIMPipeline
lowercase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
A : str =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
A : Optional[int] =DDIMScheduler()
A : Optional[Any] ={'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
A : List[Any] =torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A : Union[str, Any] =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A : Optional[int] ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
A : Union[str, Any] ='cpu'
A : Tuple =self.get_dummy_components()
A : Union[str, Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : str =self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
A : str =pipe(**SCREAMING_SNAKE_CASE__ ).images
A : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A : Optional[Any] =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Any ='google/ddpm-cifar10-32'
A : Optional[int] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMScheduler()
A : int =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Dict =torch.manual_seed(0 )
A : Optional[Any] =ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='numpy' ).images
A : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Tuple =np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : Optional[int] ='google/ddpm-ema-bedroom-256'
A : str =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : Optional[int] =ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
A : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Optional[int] =np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 704 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase : List[Any] =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
A : Tuple =feature_size
A : int =sampling_rate
A : List[str] =padding_value
A : Tuple =kwargs.pop('padding_side' , 'right' )
A : str =kwargs.pop('return_attention_mask' , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A : Tuple ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
A : Dict =processed_features[self.model_input_names[0]]
A : int =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE__ ) == 0:
if return_attention_mask:
A : List[Any] =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A : List[str] =required_input[0]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A : Any =0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE__ ):
A : Dict =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
A : List[Any] ='tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
A : Optional[int] ='pt'
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float, list, tuple, np.ndarray) ):
A : Union[str, Any] ='np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A : int =to_numpy(SCREAMING_SNAKE_CASE__ )
else:
A : List[Any] =[to_numpy(SCREAMING_SNAKE_CASE__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A : List[Any] =self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =processed_features[self.model_input_names[0]]
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if not all(len(SCREAMING_SNAKE_CASE__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A : Tuple =[]
for i in range(SCREAMING_SNAKE_CASE__ ):
A : int ={k: v[i] for k, v in processed_features.items()}
# truncation
A : List[Any] =self._truncate(
SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )
truncated_inputs.append(SCREAMING_SNAKE_CASE__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A : Any =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A : Optional[Any] =PaddingStrategy.MAX_LENGTH
A : List[Any] ={}
for i in range(SCREAMING_SNAKE_CASE__ ):
# padding
A : Optional[Any] =self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
for key, value in outputs.items():
if key not in batch_outputs:
A : Dict =[]
if value.dtype is np.dtype(np.floataa ):
A : Tuple =value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE__ )
return BatchFeature(SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> dict:
A : Optional[int] =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A : List[str] =len(SCREAMING_SNAKE_CASE__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Tuple =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : int =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A : str =np.ones(len(SCREAMING_SNAKE_CASE__ ) , dtype=np.intaa )
if needs_to_be_padded:
A : Union[str, Any] =max_length - len(SCREAMING_SNAKE_CASE__ )
if self.padding_side == "right":
if return_attention_mask:
A : Dict =np.pad(
processed_features['attention_mask'] , (0, difference) )
A : str =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A : List[Any] =np.pad(
processed_features['attention_mask'] , (difference, 0) )
A : Union[str, Any] =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A : Tuple =np.pad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A : Tuple =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A : Any =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A : List[str] =len(SCREAMING_SNAKE_CASE__ ) > max_length
if needs_to_be_truncated:
A : Union[str, Any] =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A : Dict =processed_features['attention_mask'][:max_length]
return processed_features
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
A : List[Any] =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Tuple =PaddingStrategy(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : Optional[int] =padding
else:
A : List[str] =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 661 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Union[str, Any] ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_lowercase : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : List[str] ={
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : int = "deberta-v2"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str=12_81_00 , SCREAMING_SNAKE_CASE__ : List[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Dict=24 , SCREAMING_SNAKE_CASE__ : List[str]=24 , SCREAMING_SNAKE_CASE__ : List[str]=61_44 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-7 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=-1 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : Dict =hidden_size
A : Optional[Any] =num_hidden_layers
A : Optional[int] =num_attention_heads
A : Optional[int] =intermediate_size
A : Any =hidden_act
A : Any =hidden_dropout_prob
A : Union[str, Any] =attention_probs_dropout_prob
A : Optional[Any] =max_position_embeddings
A : Tuple =type_vocab_size
A : Tuple =initializer_range
A : int =relative_attention
A : int =max_relative_positions
A : Optional[Any] =pad_token_id
A : Union[str, Any] =position_biased_input
# Backwards compatibility
if type(SCREAMING_SNAKE_CASE__ ) == str:
A : Any =[x.strip() for x in pos_att_type.lower().split('|' )]
A : Any =pos_att_type
A : Tuple =vocab_size
A : Any =layer_norm_eps
A : Optional[Any] =kwargs.get('pooler_hidden_size' , SCREAMING_SNAKE_CASE__ )
A : str =pooler_dropout
A : Any =pooler_hidden_act
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A : List[Any] ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A : int ={0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
return 12
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : int = 40 , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
A : str =super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 661 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_lowercase : Tuple =logging.get_logger(__name__)
_lowercase : Optional[int] ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase : Any ={
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
_lowercase : Dict ={
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
_lowercase : Dict ={
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Any = VOCAB_FILES_NAMES
lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : int = PRETRAINED_INIT_CONFIGURATION
lowercase : Optional[int] = ["input_ids", "attention_mask"]
lowercase : List[str] = DistilBertTokenizer
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[int]="[UNK]" , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[PAD]" , SCREAMING_SNAKE_CASE__ : str="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple="[MASK]" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> List[Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
A : List[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
A : Dict =getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
A : Dict =do_lower_case
A : Dict =strip_accents
A : Optional[Any] =tokenize_chinese_chars
A : Any =normalizer_class(**SCREAMING_SNAKE_CASE__ )
A : int =do_lower_case
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> Union[str, Any]:
A : List[str] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
A : List[Any] =[self.sep_token_id]
A : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
A : Union[str, Any] =self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 706 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Tuple = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 5_02_57 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : int = 7_68 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "gelu_new" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 1e-5 , SCREAMING_SNAKE_CASE__ : float = 0.0_2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[str]:
super().__init__()
A : str =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
A : List[Any] =prefix_inner_dim
A : Dict =prefix_hidden_dim
A : List[str] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A : Dict =GPTaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , n_positions=SCREAMING_SNAKE_CASE__ , n_embd=SCREAMING_SNAKE_CASE__ , n_layer=SCREAMING_SNAKE_CASE__ , n_head=SCREAMING_SNAKE_CASE__ , n_inner=SCREAMING_SNAKE_CASE__ , activation_function=SCREAMING_SNAKE_CASE__ , resid_pdrop=SCREAMING_SNAKE_CASE__ , embd_pdrop=SCREAMING_SNAKE_CASE__ , attn_pdrop=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ , initializer_range=SCREAMING_SNAKE_CASE__ , scale_attn_weights=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE__ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE__ , )
A : Dict =GPTaLMHeadModel(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None , ) -> Optional[Any]:
A : str =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
A : Any =self.encode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.decode_prefix(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A : int =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A : Optional[int] =torch.cat((dummy_token, input_ids) , dim=1 )
A : Dict =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : torch.device ) -> torch.Tensor:
return torch.zeros(SCREAMING_SNAKE_CASE__ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
return self.encode_prefix(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
A : Dict =torch.split(SCREAMING_SNAKE_CASE__ , 1 , dim=0 )
A : int =[]
A : Optional[int] =[]
for feature in features:
A : int =self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE__ ) ) # back to the clip feature
# Only support beam search for now
A , A : Dict =self.generate_beam(
input_embeds=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A : str =torch.stack(SCREAMING_SNAKE_CASE__ )
A : int =torch.stack(SCREAMING_SNAKE_CASE__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : int = 67 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Dict:
A : Dict =eos_token_id
A : str =None
A : List[Any] =None
A : List[Any] =torch.ones(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.int )
A : str =torch.zeros(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=torch.bool )
if input_embeds is not None:
A : Any =input_embeds
else:
A : List[Any] =self.transformer.transformer.wte(SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ ):
A : Any =self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE__ )
A : str =outputs.logits
A : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A : List[str] =logits.softmax(-1 ).log()
if scores is None:
A , A : Any =logits.topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Any =generated.expand(SCREAMING_SNAKE_CASE__ , *generated.shape[1:] )
A , A : Tuple =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A : Union[str, Any] =next_tokens
else:
A : str =tokens.expand(SCREAMING_SNAKE_CASE__ , *tokens.shape[1:] )
A : Optional[int] =torch.cat((tokens, next_tokens) , dim=1 )
else:
A : Optional[Any] =-float(np.inf )
A : Tuple =0
A : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A : int =scores_sum / seq_lengths[:, None]
A , A : Optional[int] =scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE__ , -1 )
A : Dict =next_tokens // scores_sum.shape[1]
A : Optional[Any] =seq_lengths[next_tokens_source]
A : Tuple =next_tokens % scores_sum.shape[1]
A : Optional[Any] =next_tokens.unsqueeze(1 )
A : Optional[Any] =tokens[next_tokens_source]
A : Any =torch.cat((tokens, next_tokens) , dim=1 )
A : List[str] =generated[next_tokens_source]
A : List[Any] =scores_sum_average * seq_lengths
A : Optional[Any] =is_stopped[next_tokens_source]
A : Optional[int] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A : Any =torch.cat((generated, next_token_embed) , dim=1 )
A : Optional[int] =is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE__ ).squeeze()
if is_stopped.all():
break
A : Optional[Any] =scores / seq_lengths
A : str =scores.argsort(descending=SCREAMING_SNAKE_CASE__ )
# tokens tensors are already padded to max_seq_length
A : Optional[Any] =[tokens[i] for i in order]
A : Any =torch.stack(SCREAMING_SNAKE_CASE__ , dim=0 )
A : str =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowercase : Union[str, Any] =logging.get_logger(__name__)
_lowercase : Optional[int] ={
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Tuple = "deformable_detr"
lowercase : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : int=3_00 , SCREAMING_SNAKE_CASE__ : int=10_24 , SCREAMING_SNAKE_CASE__ : List[str]=6 , SCREAMING_SNAKE_CASE__ : Tuple=10_24 , SCREAMING_SNAKE_CASE__ : Any=8 , SCREAMING_SNAKE_CASE__ : Any=6 , SCREAMING_SNAKE_CASE__ : int=10_24 , SCREAMING_SNAKE_CASE__ : List[Any]=8 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Dict="relu" , SCREAMING_SNAKE_CASE__ : Any=2_56 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Tuple=1.0 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Optional[int]="sine" , SCREAMING_SNAKE_CASE__ : Tuple="resnet50" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : int=3_00 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : List[Any]=5 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.2_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
A : Optional[int] =CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A : int =backbone_config.get('model_type' )
A : Optional[Any] =CONFIG_MAPPING[backbone_model_type]
A : Dict =config_class.from_dict(SCREAMING_SNAKE_CASE__ )
A : str =use_timm_backbone
A : Union[str, Any] =backbone_config
A : Dict =num_channels
A : Dict =num_queries
A : List[Any] =max_position_embeddings
A : Optional[int] =d_model
A : Optional[Any] =encoder_ffn_dim
A : Tuple =encoder_layers
A : Union[str, Any] =encoder_attention_heads
A : Optional[int] =decoder_ffn_dim
A : Union[str, Any] =decoder_layers
A : Tuple =decoder_attention_heads
A : Optional[Any] =dropout
A : Dict =attention_dropout
A : Dict =activation_dropout
A : Any =activation_function
A : str =init_std
A : Any =init_xavier_std
A : Optional[Any] =encoder_layerdrop
A : Union[str, Any] =auxiliary_loss
A : List[str] =position_embedding_type
A : List[str] =backbone
A : int =use_pretrained_backbone
A : int =dilation
# deformable attributes
A : List[Any] =num_feature_levels
A : Optional[Any] =encoder_n_points
A : List[str] =decoder_n_points
A : Optional[Any] =two_stage
A : List[str] =two_stage_num_proposals
A : Optional[int] =with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
A : Optional[Any] =class_cost
A : Tuple =bbox_cost
A : Optional[Any] =giou_cost
# Loss coefficients
A : Union[str, Any] =mask_loss_coefficient
A : Dict =dice_loss_coefficient
A : List[str] =bbox_loss_coefficient
A : Optional[int] =giou_loss_coefficient
A : Any =eos_coefficient
A : Any =focal_alpha
A : Tuple =disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
return self.d_model
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
A : List[Any] =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A : str =self.backbone_config.to_dict()
A : Dict =self.__class__.model_type
return output
| 707 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[int] =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = XLMRobertaTokenizer
lowercase : Dict = XLMRobertaTokenizerFast
lowercase : str = True
lowercase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A : List[str] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
A : List[str] ='<pad>'
A : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Union[str, Any] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : Any =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : Tuple =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A : Union[str, Any] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : Any =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[Any] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : str =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A : List[str] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
A : Optional[int] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
A : List[Any] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : List[Any] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
A : Optional[Any] =XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
A : int =pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
A : Union[str, Any] =self.get_tokenizer()
A : int =self.get_rust_tokenizer()
A : List[str] ='I was born in 92000, and this is falsé.'
A : Union[str, Any] =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Tuple =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.get_rust_tokenizer()
A : int =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Dict =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : Any ='Hello World!'
A : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
A : Any =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A : int =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# fmt: off
A : List[Any] ={'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 661 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_lowercase : List[Any] =(
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A__ ( lowercase: str, lowercase: Optional[Any] ) -> Any:
warnings.warn(lowercase, lowercase )
requires_backends(lowercase, 'sklearn' )
return (preds == labels).mean()
def A__ ( lowercase: List[Any], lowercase: Union[str, Any] ) -> Optional[Any]:
warnings.warn(lowercase, lowercase )
requires_backends(lowercase, 'sklearn' )
A : List[str] =simple_accuracy(lowercase, lowercase )
A : Union[str, Any] =fa_score(y_true=lowercase, y_pred=lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A__ ( lowercase: Optional[int], lowercase: int ) -> Union[str, Any]:
warnings.warn(lowercase, lowercase )
requires_backends(lowercase, 'sklearn' )
A : str =pearsonr(lowercase, lowercase )[0]
A : Dict =spearmanr(lowercase, lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A__ ( lowercase: Tuple, lowercase: List[Any], lowercase: Dict ) -> List[str]:
warnings.warn(lowercase, lowercase )
requires_backends(lowercase, 'sklearn' )
assert len(lowercase ) == len(lowercase ), F'Predictions and labels have mismatched lengths {len(lowercase )} and {len(lowercase )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowercase, lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowercase, lowercase )}
elif task_name == "mrpc":
return acc_and_fa(lowercase, lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(lowercase, lowercase )
elif task_name == "qqp":
return acc_and_fa(lowercase, lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowercase, lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowercase, lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowercase, lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowercase, lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowercase, lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowercase, lowercase )}
else:
raise KeyError(lowercase )
def A__ ( lowercase: int, lowercase: List[str], lowercase: str ) -> List[str]:
warnings.warn(lowercase, lowercase )
requires_backends(lowercase, 'sklearn' )
if len(lowercase ) != len(lowercase ):
raise ValueError(F'Predictions and labels have mismatched lengths {len(lowercase )} and {len(lowercase )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(lowercase, lowercase )}
else:
raise KeyError(lowercase )
| 708 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Dict ={
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = "xglm"
lowercase : Any = ["past_key_values"]
lowercase : Dict = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=25_60_08 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : List[Any]=10_24 , SCREAMING_SNAKE_CASE__ : str=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
A : str =vocab_size
A : Union[str, Any] =max_position_embeddings
A : Optional[Any] =d_model
A : Optional[int] =ffn_dim
A : int =num_layers
A : Any =attention_heads
A : Dict =activation_function
A : List[Any] =dropout
A : str =attention_dropout
A : List[Any] =activation_dropout
A : List[Any] =layerdrop
A : List[Any] =init_std
A : Union[str, Any] =scale_embedding # scale factor will be sqrt(d_model) if True
A : List[str] =use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 661 | 0 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
lowercase : int = ["torch", "transformers", "onnx"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> int:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Tuple = ["torch", "transformers", "onnx"]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
lowercase : List[Any] = ["torch", "transformers", "onnx"]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
lowercase : List[str] = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE_ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Union[str, Any] = ["torch", "transformers", "onnx"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 709 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase : List[str] ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def A__ ( ) -> List[Any]:
A : Any =_ask_options(
'In which compute environment are you running?', ['This machine', 'AWS (Amazon SageMaker)'], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A : Tuple =get_sagemaker_input()
else:
A : str =get_cluster_input()
return config
def A__ ( lowercase: int=None ) -> str:
if subparsers is not None:
A : List[str] =subparsers.add_parser('config', description=lowercase )
else:
A : Union[str, Any] =argparse.ArgumentParser('Accelerate config command', description=lowercase )
parser.add_argument(
'--config_file', default=lowercase, help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
), )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def A__ ( lowercase: Tuple ) -> List[Any]:
A : Union[str, Any] =get_user_input()
if args.config_file is not None:
A : Optional[Any] =args.config_file
else:
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
A : Union[str, Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase )
else:
config.to_yaml_file(lowercase )
print(F'accelerate configuration saved at {config_file}' )
def A__ ( ) -> Optional[int]:
A : Any =config_command_parser()
A : int =parser.parse_args()
config_command(lowercase )
if __name__ == "__main__":
main()
| 661 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Any =logging.get_logger(__name__)
_lowercase : Dict ={
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : List[Any] = "deit"
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any]=7_68 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : Optional[int]=30_72 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.0_2 , SCREAMING_SNAKE_CASE__ : Any=1e-12 , SCREAMING_SNAKE_CASE__ : str=2_24 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : List[Any]=16 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : int =hidden_size
A : Tuple =num_hidden_layers
A : Dict =num_attention_heads
A : Dict =intermediate_size
A : Any =hidden_act
A : Tuple =hidden_dropout_prob
A : int =attention_probs_dropout_prob
A : Dict =initializer_range
A : int =layer_norm_eps
A : int =image_size
A : Tuple =patch_size
A : str =num_channels
A : str =qkv_bias
A : Optional[Any] =encoder_stride
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Dict = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> float:
return 1e-4
| 710 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[str] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : Dict =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : List[Any] =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Tuple =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : Dict =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : List[Any] =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : Optional[int] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : Any =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[Any] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Optional[Any] =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : List[Any] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: Dict ) -> int:
if _re_test_backend.search(lowercase ) is None:
return None
A : Any =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Any ) -> List[Any]:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : Optional[Any] =f.readlines()
A : Dict =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Optional[int] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : int =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : int =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : int =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : Optional[int] =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : Dict =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : str ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Optional[int] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : Optional[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : Optional[Any] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : int =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : Optional[int] =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : Optional[int] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : Optional[Any] =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : Any =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Optional[Any] ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : str =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Optional[Any] =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[str] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : Any =lines[line_index]
A : Any =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Dict =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Any, lowercase: int ) -> Dict:
def find_duplicates(lowercase: List[str] ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : List[Any] =[]
for key in import_dict_objects.keys():
A : List[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : Tuple =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> List[str]:
A : Dict =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Any =os.path.join(lowercase, '__init__.py' )
A : Union[str, Any] =parse_init(lowercase )
if objects is not None:
A : str =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Any =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> int:
A : List[str] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : Any =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : List[str] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : Optional[Any] =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : Dict =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Tuple =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> Tuple:
# This is to make sure the transformers module imported is the one in the repo.
A : str =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : Any =spec.loader.load_module()
A : Any =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : Dict ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 661 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def A__ ( ) -> tuple[list[int], int]:
A : Optional[int] =[randint(-1_000, 1_000 ) for i in range(10 )]
A : List[Any] =randint(-5_000, 5_000 )
return (arr, r)
_lowercase : int =make_dataset()
def A__ ( lowercase: list[int], lowercase: int ) -> tuple[int, ...]:
for triplet in permutations(lowercase, 3 ):
if sum(lowercase ) == target:
return tuple(sorted(lowercase ) )
return (0, 0, 0)
def A__ ( lowercase: list[int], lowercase: int ) -> tuple[int, int, int]:
arr.sort()
A : Optional[int] =len(lowercase )
for i in range(n - 1 ):
A : Union[str, Any] =i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def A__ ( ) -> tuple[float, float]:
A : Optional[Any] ='\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
A : Tuple ='\ntriplet_sum1(*dataset)\n'
A : Optional[int] ='\ntriplet_sum2(*dataset)\n'
A : Dict =repeat(setup=lowercase, stmt=lowercase, repeat=5, number=10_000 )
A : Optional[int] =repeat(setup=lowercase, stmt=lowercase, repeat=5, number=10_000 )
return (min(lowercase ), min(lowercase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase : List[Any] =solution_times()
print(f'''The time for naive implementation is {times[0]}.''')
print(f'''The time for optimized implementation is {times[1]}.''')
| 711 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : Any =logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
lowercase : Optional[str] = field(
default="linear" , metadata={"help": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 661 | 0 |
from __future__ import annotations
from collections.abc import Callable
_lowercase : List[Any] =list[list[float | int]]
def A__ ( lowercase: Matrix, lowercase: Matrix ) -> Matrix:
A : int =len(lowercase )
A : Matrix =[[0 for _ in range(size + 1 )] for _ in range(lowercase )]
A : int
A : int
A : int
A : int
A : int
A : float
for row in range(lowercase ):
for col in range(lowercase ):
A : Union[str, Any] =matrix[row][col]
A : Dict =vector[row][0]
A : Tuple =0
A : Any =0
while row < size and col < size:
# pivoting
A : str =max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase, lowercase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A : Union[str, Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, lowercase ):
A : Union[str, Any] =augmented[rowa][col] / augmented[row][col]
A : List[str] =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, lowercase ):
for row in range(lowercase ):
A : List[Any] =augmented[row][col] / augmented[col][col]
for cola in range(lowercase, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(lowercase )
]
def A__ ( lowercase: list[int] ) -> Callable[[int], int]:
A : int =len(lowercase )
A : Matrix =[[0 for _ in range(lowercase )] for _ in range(lowercase )]
A : Matrix =[[0] for _ in range(lowercase )]
A : Matrix
A : int
A : int
A : int
for x_val, y_val in enumerate(lowercase ):
for col in range(lowercase ):
A : Any =(x_val + 1) ** (size - col - 1)
A : List[str] =y_val
A : Tuple =solve(lowercase, lowercase )
def interpolated_func(lowercase: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase ) )
return interpolated_func
def A__ ( lowercase: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( lowercase: Callable[[int], int] = question_function, lowercase: int = 10 ) -> int:
A : list[int] =[func(lowercase ) for x_val in range(1, order + 1 )]
A : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
A : int =0
A : Callable[[int], int]
A : int
for poly in polynomials:
A : Optional[Any] =1
while func(lowercase ) == poly(lowercase ):
x_val += 1
ret += poly(lowercase )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 712 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : int =2
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
A , A , A , A : Optional[Any] =bos, unk, pad, eos
A : Dict =[]
A : Union[str, Any] =[]
A : Any ={}
A : int =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : Any =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return self.indices == other.indices
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
A : Union[str, Any] =cls()
d.add_from_file(SCREAMING_SNAKE_CASE__ )
return d
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if word in self.indices and not overwrite:
A : int =self.indices[word]
A : Union[str, Any] =self.count[idx] + n
return idx
else:
A : Tuple =len(self.symbols )
A : str =idx
self.symbols.append(SCREAMING_SNAKE_CASE__ )
self.count.append(SCREAMING_SNAKE_CASE__ )
return idx
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE__ ) )
return
A : str =f.readlines()
A : int =self._load_meta(SCREAMING_SNAKE_CASE__ )
for line in lines[indices_start_line:]:
try:
A , A : Optional[int] =line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A : int =True
A , A : Optional[Any] =line.rsplit(' ' , 1 )
else:
A : Any =False
A : Tuple =int(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE__ ) )
self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A__ ( lowercase: Union[str, Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A : int =dict((re.sub(r'@@$', '', lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', lowercase ), v) for k, v in d.items() )
A : int ='<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : List[Any] =d[k] # restore
return da
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> str:
# prep
if not os.path.exists(lowercase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase, exist_ok=lowercase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[str] =os.path.join(lowercase, 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Optional[Any] =torch.load(lowercase, map_location='cpu' )
A : Any =chkpt['cfg']['model']
# dicts
A : Any =os.path.join(lowercase, 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Dict =Dictionary.load(lowercase )
A : Optional[Any] =rewrite_dict_keys(src_dict.indices )
A : Tuple =len(lowercase )
A : Any =os.path.join(lowercase, VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
A : List[str] =os.path.join(lowercase, 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : List[str] =os.path.join(lowercase, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase, lowercase )
# model config
A : Tuple =os.path.join(lowercase, 'config.json' )
A : Tuple ={
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
A : int =os.path.join(lowercase, lowercase )
A : List[str] ={
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
A : List[Any] =chkpt['model']
# remove unneeded keys
A : List[Any] =[
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
A : str =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A : Union[str, Any] =model_state_dict.pop(lowercase )
else:
A : List[str] =model_state_dict.pop(lowercase )
A : Any =BioGptConfig.from_pretrained(lowercase )
A : str =BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
A : Tuple =os.path.join(lowercase, lowercase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase, lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[Any] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 661 | 0 |
def A__ ( lowercase: float, lowercase: float ) -> float:
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(lowercase ) * abs(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 713 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_lowercase : str =False
_lowercase : Optional[Any] =False
def A__ ( lowercase: Namespace ) -> Optional[int]:
return TrainCommand(lowercase )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Optional[Any] =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE__ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Namespace ) -> List[Any]:
A : Optional[int] =logging.get_logger('transformers-cli/training' )
A : Dict ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =args.output
A : List[str] =args.column_label
A : int =args.column_text
A : Union[str, Any] =args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
A : Optional[Any] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
A : Tuple =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Dict =None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
A : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A : Optional[Any] =args.validation_split
A : str =args.train_batch_size
A : Any =args.valid_batch_size
A : Dict =args.learning_rate
A : List[str] =args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]:
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 661 | 0 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_lowercase : List[str] =logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 714 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Tuple=30 , SCREAMING_SNAKE_CASE__ : int=4_00 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Any=1 / 2_55 , SCREAMING_SNAKE_CASE__ : int=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : Optional[Any] =size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
A : Union[str, Any] =parent
A : Union[str, Any] =batch_size
A : Union[str, Any] =num_channels
A : int =min_resolution
A : List[Any] =max_resolution
A : Dict =do_resize
A : Tuple =size
A : List[str] =do_normalize
A : List[Any] =image_mean
A : Dict =image_std
A : Any =do_rescale
A : List[str] =rescale_factor
A : Optional[Any] =do_pad
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Dict:
if not batched:
A : Any =image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
A , A : Union[str, Any] =image.size
else:
A , A : Tuple =image.shape[1], image.shape[2]
if w < h:
A : Any =int(self.size['shortest_edge'] * h / w )
A : Any =self.size['shortest_edge']
elif w > h:
A : Dict =self.size['shortest_edge']
A : Dict =int(self.size['shortest_edge'] * w / h )
else:
A : List[str] =self.size['shortest_edge']
A : Dict =self.size['shortest_edge']
else:
A : List[Any] =[]
for image in image_inputs:
A , A : int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : str =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
A : Tuple =max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
A : str =ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
A : str =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
A : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A : List[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : List[str] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : Union[str, Any] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A : Tuple =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Any =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : Optional[int] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
A : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A : Optional[int] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A : Tuple =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
A , A : int =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
# prepare image and target
A : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A : List[Any] =json.loads(f.read() )
A : Any ={'image_id': 3_97_69, 'annotations': target}
# encode them
A : str =ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
A : Any =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Optional[Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : List[str] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Dict =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : str =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : Dict =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : Union[str, Any] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
A : List[Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : int =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
# prepare image, target and masks_path
A : List[str] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A : Optional[int] =json.loads(f.read() )
A : int ={'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
A : Optional[Any] =pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A : List[Any] =ConditionalDetrImageProcessor(format='coco_panoptic' )
A : Union[str, Any] =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# verify pixel values
A : Dict =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , SCREAMING_SNAKE_CASE__ )
A : Dict =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
A : Optional[int] =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
A : List[Any] =torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , SCREAMING_SNAKE_CASE__ )
A : Any =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
A : List[Any] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
A : Any =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
A : str =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
A : int =82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
A : Any =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , SCREAMING_SNAKE_CASE__ ) )
# verify size
A : str =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , SCREAMING_SNAKE_CASE__ ) )
| 661 | 0 |
def A__ ( lowercase: int ) -> int:
if not isinstance(lowercase, lowercase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A : Any =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : List[Any] =1_6
_lowercase : Union[str, Any] =3_2
def A__ ( lowercase: Accelerator, lowercase: int = 16, lowercase: str = "bert-base-cased" ) -> Optional[int]:
A : List[Any] =AutoTokenizer.from_pretrained(lowercase )
A : Any =load_dataset('glue', 'mrpc' )
def tokenize_function(lowercase: Any ):
# max_length=None => use the model max length (it's actually the default)
A : List[str] =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A : Any =datasets.map(
lowercase, batched=lowercase, remove_columns=['idx', 'sentence1', 'sentence2'], load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : Dict =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowercase: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase, padding='max_length', max_length=128, return_tensors='pt' )
return tokenizer.pad(lowercase, padding='longest', return_tensors='pt' )
# Instantiate dataloaders.
A : Union[str, Any] =DataLoader(
tokenized_datasets['train'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
A : str =DataLoader(
tokenized_datasets['validation'], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
def A__ ( lowercase: Dict, lowercase: Optional[int], lowercase: Any, lowercase: str ) -> Tuple:
model.eval()
A : Tuple =0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A : Tuple =model(**lowercase )
A : Tuple =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A : Union[str, Any] =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
A : List[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
A : Optional[int] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase, references=lowercase, )
A : Union[str, Any] =metric.compute()
return eval_metric["accuracy"]
def A__ ( lowercase: Union[str, Any], lowercase: Dict ) -> List[str]:
# Initialize accelerator
A : Optional[int] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : int =config['lr']
A : Optional[Any] =int(config['num_epochs'] )
A : Union[str, Any] =int(config['seed'] )
A : List[str] =int(config['batch_size'] )
A : Optional[Any] =args.model_name_or_path
set_seed(lowercase )
A , A : str =get_dataloaders(lowercase, lowercase, lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : List[str] =AutoModelForSequenceClassification.from_pretrained(lowercase, return_dict=lowercase )
# Instantiate optimizer
A : Any =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A : List[str] =optimizer_cls(params=model.parameters(), lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
A : Optional[int] =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A : Dict =1
A : Union[str, Any] =(len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A : List[Any] =get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=0, num_training_steps=lowercase, )
else:
A : List[str] =DummyScheduler(lowercase, total_num_steps=lowercase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A : Optional[int] =accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# We need to keep track of how many total steps we have iterated over
A : Tuple =0
# We also need to keep track of the stating epoch so files are named properly
A : List[str] =0
A : Tuple =evaluate.load('glue', 'mrpc' )
A : Optional[int] =num_epochs
if args.partial_train_epoch is not None:
A : Dict =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A : List[Any] =args.resume_from_checkpoint.split('epoch_' )[1]
A : List[Any] =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A : Union[str, Any] =int(lowercase ) + 1
A : List[str] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
accelerator.print('resumed checkpoint performance:', lowercase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:', lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:', optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir, F'state_{starting_epoch-1}.json' ), 'r' ) as f:
A : Union[str, Any] =json.load(lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A : str ={}
for epoch in range(lowercase, lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
A : Tuple =model(**lowercase )
A : List[Any] =outputs.loss
A : Any =loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A : Union[str, Any] =F'epoch_{epoch}'
A : Optional[Any] =os.path.join(args.output_dir, lowercase )
accelerator.save_state(lowercase )
A : Optional[Any] =evaluation_loop(lowercase, lowercase, lowercase, lowercase )
A : Dict =accuracy
A : Optional[Any] =lr_scheduler.get_lr()[0]
A : Any =optimizer.param_groups[0]['lr']
A : str =epoch
A : Dict =overall_step
accelerator.print(F'epoch {epoch}:', lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'state_{epoch}.json' ), 'w' ) as f:
json.dump(lowercase, lowercase )
def A__ ( ) -> Optional[int]:
A : Optional[int] =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path', type=lowercase, default='bert-base-cased', help='Path to pretrained model or model identifier from huggingface.co/models.', required=lowercase, )
parser.add_argument(
'--output_dir', type=lowercase, default='.', help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.', )
parser.add_argument(
'--resume_from_checkpoint', type=lowercase, default=lowercase, help='If the training should continue from a checkpoint folder.', )
parser.add_argument(
'--partial_train_epoch', type=lowercase, default=lowercase, help='If passed, the training will stop after this number of epochs.', )
parser.add_argument(
'--num_epochs', type=lowercase, default=2, help='Number of train epochs.', )
A : str =parser.parse_args()
A : Optional[int] ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 661 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : List[str] ={
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict =[
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =[
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] =[
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_lowercase : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716 |
def A__ ( lowercase: int ) -> int:
if not isinstance(lowercase, lowercase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A : Any =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
A : Any =FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A : Dict =AutoTokenizer.from_pretrained('google/mt5-small' )
A : str =tokenizer('Hello there' , return_tensors='np' ).input_ids
A : Optional[Any] =tokenizer('Hi I am' , return_tensors='np' ).input_ids
A : List[Any] =shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id )
A : Optional[int] =model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits
A : str =optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean()
A : str =-(labels.shape[-1] * loss.item())
A : List[Any] =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 717 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A__ ( *lowercase: Tuple, lowercase: Optional[Union[Dict, Any]] = None, lowercase: Dict=True, lowercase: Any=2 ) -> List[Any]:
from .. import __version__
A : Optional[Any] =take_from
A : Union[str, Any] =()
if not isinstance(args[0], lowercase ):
A : List[str] =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase ).base_version ) >= version.parse(lowercase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
A : Tuple =None
if isinstance(lowercase, lowercase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase ),)
A : Union[str, Any] =F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(lowercase, lowercase ):
values += (getattr(lowercase, lowercase ),)
A : Optional[Any] =F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A : List[Any] =F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A : List[Any] =warning + ' ' if standard_warn else ''
warnings.warn(warning + message, lowercase, stacklevel=lowercase )
if isinstance(lowercase, lowercase ) and len(lowercase ) > 0:
A : Any =inspect.getouterframes(inspect.currentframe() )[1]
A : int =call_frame.filename
A : int =call_frame.lineno
A : Optional[int] =call_frame.function
A , A : int =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(lowercase ) == 0:
return
elif len(lowercase ) == 1:
return values[0]
return values
| 661 | 0 |
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ) -> Tuple:
# Input as list
A : Optional[Any] =list(poly_a or [0] )[:]
A : int =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
A : List[str] =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
A : Optional[int] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
A : List[Any] =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
A : Optional[Any] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
A : List[Any] =self.__multiply()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
A : str =[[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
A : List[str] =self.c_max_length // 2
while next_ncol > 0:
A : Any =[[] for i in range(SCREAMING_SNAKE_CASE__ )]
A : Optional[Any] =self.root**next_ncol
# First half of next step
A : Tuple =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
A : Optional[Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
A : Dict =new_dft
A : Optional[int] =next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : List[Any] =self.__dft('A' )
A : Dict =self.__dft('B' )
A : Optional[Any] =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
A : Union[str, Any] =2
while next_ncol <= self.c_max_length:
A : Optional[int] =[[] for i in range(SCREAMING_SNAKE_CASE__ )]
A : Any =self.root ** (next_ncol // 2)
A : Union[str, Any] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
A : Any =new_inverse_c
next_ncol *= 2
# Unpack
A : List[Any] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : str ) -> int:
A : List[Any] ='A = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
A : List[str] ='B = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
A : List[Any] ='A*B = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( lowercase: int, lowercase: str ) -> Dict:
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Dict, lowercase: Tuple, lowercase: str ) -> str:
A : Any =tmp_path / 'cache'
A : Dict ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : Dict =JsonDatasetReader(lowercase, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Any, lowercase: Union[str, Any] ) -> Tuple:
A : Tuple =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : Optional[Any] =features.copy() if features else default_expected_features
A : Union[str, Any] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : str =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
], )
def A__ ( lowercase: Optional[int], lowercase: str, lowercase: Dict ) -> Optional[int]:
A : int =tmp_path / 'cache'
A : Tuple ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
A : int =features.copy() if features else default_expected_features
A : str =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( lowercase: Optional[Any], lowercase: str ) -> Tuple:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A : str ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
A : Dict =features.copy()
A : List[str] =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : int =tmp_path / 'cache'
A : Optional[int] =JsonDatasetReader(lowercase, features=lowercase, cache_dir=lowercase ).read()
assert isinstance(lowercase, lowercase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Union[str, Any], lowercase: Any, lowercase: str ) -> Optional[Any]:
A : Optional[int] =tmp_path / 'cache'
A : Optional[Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =JsonDatasetReader(lowercase, cache_dir=lowercase, split=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def A__ ( lowercase: Optional[Any], lowercase: int, lowercase: Union[str, Any] ) -> List[Any]:
if issubclass(lowercase, lowercase ):
A : int =jsonl_path
elif issubclass(lowercase, lowercase ):
A : Any =[jsonl_path]
A : Optional[Any] =tmp_path / 'cache'
A : Tuple ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[str] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_dataset(lowercase, lowercase )
def A__ ( lowercase: List[str], lowercase: Tuple, lowercase: Optional[Any]=("train",) ) -> Tuple:
assert isinstance(lowercase, lowercase )
for split in splits:
A : List[str] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def A__ ( lowercase: Tuple, lowercase: Optional[int], lowercase: Any ) -> str:
A : List[str] =tmp_path / 'cache'
A : Union[str, Any] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A : str =JsonDatasetReader({'train': jsonl_path}, cache_dir=lowercase, keep_in_memory=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
], )
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Optional[int] ) -> Tuple:
A : Any =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : str =features.copy() if features else default_expected_features
A : Dict =(
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A : Optional[Any] =JsonDatasetReader({'train': jsonl_path}, features=lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any] ) -> Tuple:
if split:
A : Optional[int] ={split: jsonl_path}
else:
A : Dict ='train'
A : Optional[Any] ={'train': jsonl_path, 'test': jsonl_path}
A : Tuple =tmp_path / 'cache'
A : List[str] ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A : List[Any] =JsonDatasetReader(lowercase, cache_dir=lowercase ).read()
_check_json_datasetdict(lowercase, lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( lowercase: List[Any] ) -> Tuple:
return json.load(lowercase )
def A__ ( lowercase: List[Any] ) -> Tuple:
return [json.loads(lowercase ) for line in buffer]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write()
buffer.seek(0 )
A : Any =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : int =load_json_function(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write()
buffer.seek(0 )
A : List[Any] =load_json(SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE__ ) == 10
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
A : Union[str, Any] =tmp_path_factory.mktemp('data' ) / f'test.json.{extension}'
A : Union[str, Any] =str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : str =f.read()
with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f:
A : List[str] =f.read()
assert exported_content == original_content
| 661 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def A__ ( lowercase: Dict, lowercase: Union[str, Any] ) -> Optional[int]:
A : List[str] =[]
for part_id in partition_order:
A : Optional[Any] =df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(lowercase ):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def A__ ( ) -> List[Any]:
A : Optional[int] =pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A : Any =spark.range(100 ).repartition(1 )
A : List[str] =Spark(lowercase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def A__ ( ) -> str:
A : List[Any] =pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A : str =spark.range(10 ).repartition(2 )
A : Union[str, Any] =[1, 0]
A : Optional[Any] =_generate_iterable_examples(lowercase, lowercase ) # Reverse the partitions.
A : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(lowercase, lowercase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A : Any =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A__ ( ) -> Union[str, Any]:
A : Dict =pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A : int =spark.range(10 ).repartition(1 )
A : str =SparkExamplesIterable(lowercase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowercase ):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def A__ ( ) -> int:
A : str =pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A : Union[str, Any] =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
A : Dict =lambda lowercase : x.reverse()
A : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(lowercase, [2, 1, 0] )
A : Tuple =SparkExamplesIterable(lowercase ).shuffle_data_sources(lowercase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowercase ):
A : str =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A__ ( ) -> Optional[int]:
A : Optional[int] =pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A : Dict =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A : Dict =SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
A : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(lowercase, [0, 2] )
for i, (row_id, row_dict) in enumerate(lowercase ):
A : Dict =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A : Any =SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
A : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(lowercase, [1, 3] )
for i, (row_id, row_dict) in enumerate(lowercase ):
A : str =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A__ ( ) -> Optional[int]:
A : Optional[Any] =pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A : int =spark.range(100 ).repartition(1 )
A : Optional[Any] =Spark(lowercase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 719 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] = DDIMPipeline
lowercase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase : Optional[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
A : str =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
A : Optional[int] =DDIMScheduler()
A : Optional[Any] ={'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
A : List[Any] =torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A : Union[str, Any] =torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A : Optional[int] ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
A : Union[str, Any] ='cpu'
A : Tuple =self.get_dummy_components()
A : Union[str, Any] =self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : str =self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
A : str =pipe(**SCREAMING_SNAKE_CASE__ ).images
A : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A : Optional[Any] =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Any ='google/ddpm-cifar10-32'
A : Optional[int] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMScheduler()
A : int =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddim.to(SCREAMING_SNAKE_CASE__ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Dict =torch.manual_seed(0 )
A : Optional[Any] =ddim(generator=SCREAMING_SNAKE_CASE__ , eta=0.0 , output_type='numpy' ).images
A : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : Tuple =np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
A : Optional[int] ='google/ddpm-ema-bedroom-256'
A : str =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : str =DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
ddpm.to(SCREAMING_SNAKE_CASE__ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A : Any =torch.manual_seed(0 )
A : Optional[int] =ddpm(generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
A : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A : Optional[int] =np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 661 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.