code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[Any] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
import math
import unittest
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
with self.assertRaises(A):
is_prime(-19)
self.assertFalse(
is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 339 | 0 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Tuple = DownBlockaD # noqa F405
_lowerCamelCase : Optional[int] = """down"""
def lowercase ( self : List[Any] ):
_UpperCAmelCase = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Tuple = ResnetDownsampleBlockaD # noqa F405
_lowerCamelCase : Tuple = """down"""
def lowercase ( self : Dict ):
_UpperCAmelCase = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Optional[int] = AttnDownBlockaD # noqa F405
_lowerCamelCase : Optional[int] = """down"""
def lowercase ( self : Dict ):
_UpperCAmelCase = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = CrossAttnDownBlockaD # noqa F405
_lowerCamelCase : Dict = """down"""
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 3_2
return init_dict, inputs_dict
def lowercase ( self : List[str] ):
_UpperCAmelCase = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : str = SimpleCrossAttnDownBlockaD # noqa F405
_lowerCamelCase : List[Any] = """down"""
@property
def lowercase ( self : Optional[Any] ):
return super().get_dummy_input(include_encoder_hidden_states=snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowercase ( self : Dict ):
_UpperCAmelCase = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Union[str, Any] = SkipDownBlockaD # noqa F405
_lowerCamelCase : Optional[int] = """down"""
@property
def lowercase ( self : List[str] ):
return super().get_dummy_input(include_skip_sample=snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = AttnSkipDownBlockaD # noqa F405
_lowerCamelCase : Optional[int] = """down"""
@property
def lowercase ( self : Any ):
return super().get_dummy_input(include_skip_sample=snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : str = DownEncoderBlockaD # noqa F405
_lowerCamelCase : Any = """down"""
@property
def lowercase ( self : Dict ):
return super().get_dummy_input(include_temb=snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase = {
"in_channels": 3_2,
"out_channels": 3_2,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : List[Any] ):
_UpperCAmelCase = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = AttnDownEncoderBlockaD # noqa F405
_lowerCamelCase : str = """down"""
@property
def lowercase ( self : str ):
return super().get_dummy_input(include_temb=snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = {
"in_channels": 3_2,
"out_channels": 3_2,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : Tuple ):
_UpperCAmelCase = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = UNetMidBlockaD # noqa F405
_lowerCamelCase : Dict = """mid"""
def lowercase ( self : List[str] ):
_UpperCAmelCase = {
"in_channels": 3_2,
"temb_channels": 1_2_8,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : List[Any] ):
_UpperCAmelCase = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = UNetMidBlockaDCrossAttn # noqa F405
_lowerCamelCase : Any = """mid"""
def lowercase ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 3_2
return init_dict, inputs_dict
def lowercase ( self : str ):
_UpperCAmelCase = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowerCamelCase : List[Any] = """mid"""
@property
def lowercase ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 3_2
return init_dict, inputs_dict
def lowercase ( self : Dict ):
_UpperCAmelCase = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : int = UpBlockaD # noqa F405
_lowerCamelCase : Optional[int] = """up"""
@property
def lowercase ( self : List[str] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Any = ResnetUpsampleBlockaD # noqa F405
_lowerCamelCase : Union[str, Any] = """up"""
@property
def lowercase ( self : List[str] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Any = CrossAttnUpBlockaD # noqa F405
_lowerCamelCase : Any = """up"""
@property
def lowercase ( self : List[str] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowercase ( self : List[Any] ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 3_2
return init_dict, inputs_dict
def lowercase ( self : int ):
_UpperCAmelCase = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Tuple = SimpleCrossAttnUpBlockaD # noqa F405
_lowerCamelCase : Tuple = """up"""
@property
def lowercase ( self : Any ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ , include_encoder_hidden_states=snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase , _UpperCAmelCase = super().prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = 3_2
return init_dict, inputs_dict
def lowercase ( self : List[str] ):
_UpperCAmelCase = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = AttnUpBlockaD # noqa F405
_lowerCamelCase : str = """up"""
@property
def lowercase ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = SkipUpBlockaD # noqa F405
_lowerCamelCase : Union[str, Any] = """up"""
@property
def lowercase ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = AttnSkipUpBlockaD # noqa F405
_lowerCamelCase : int = """up"""
@property
def lowercase ( self : List[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case_ )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = UpDecoderBlockaD # noqa F405
_lowerCamelCase : Tuple = """up"""
@property
def lowercase ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = {"in_channels": 3_2, "out_channels": 3_2}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(snake_case_ )
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Union[str, Any] = AttnUpDecoderBlockaD # noqa F405
_lowerCamelCase : int = """up"""
@property
def lowercase ( self : Optional[int] ):
return super().get_dummy_input(include_temb=snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = {"in_channels": 3_2, "out_channels": 3_2}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(snake_case_ )
| 22 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : str) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A , hypotheses=A , min_len=A , max_len=A)
}
| 339 | 0 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE( A__ , A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self : str , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : str , __snake_case : bool = False , ) -> int:
super().__init__()
UpperCAmelCase : Any = nn.Embedding(__snake_case , __snake_case )
UpperCAmelCase : Tuple = nn.Embedding(__snake_case , __snake_case )
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Dict = nn.Dropout(p=__snake_case )
UpperCAmelCase : str = TaConfig(
vocab_size=__snake_case , d_model=__snake_case , num_heads=__snake_case , d_kv=__snake_case , d_ff=__snake_case , dropout_rate=__snake_case , feed_forward_proj=__snake_case , is_decoder=__snake_case , is_encoder_decoder=__snake_case , )
UpperCAmelCase : List[str] = nn.ModuleList()
for lyr_num in range(__snake_case ):
UpperCAmelCase : int = TaBlock(__snake_case )
self.encoders.append(__snake_case )
UpperCAmelCase : Dict = TaLayerNorm(__snake_case )
UpperCAmelCase : Dict = nn.Dropout(p=__snake_case )
def A ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : str ) -> Dict:
UpperCAmelCase : Dict = self.token_embedder(__snake_case )
UpperCAmelCase : Tuple = encoder_input_tokens.shape[1]
UpperCAmelCase : Union[str, Any] = torch.arange(__snake_case , device=encoder_input_tokens.device )
x += self.position_encoding(__snake_case )
UpperCAmelCase : int = self.dropout_pre(__snake_case )
# inverted the attention mask
UpperCAmelCase : List[str] = encoder_input_tokens.size()
UpperCAmelCase : str = self.get_extended_attention_mask(__snake_case , __snake_case )
for lyr in self.encoders:
UpperCAmelCase : Dict = lyr(__snake_case , __snake_case )[0]
UpperCAmelCase : Optional[int] = self.layer_norm(__snake_case )
return self.dropout_post(__snake_case ), encoder_inputs_mask
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 339 | 0 |
def lowerCamelCase__ ( snake_case_ : int = 100 ) -> int:
__snake_case = n * (n + 1) * (2 * n + 1) / 6
__snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''open-llama'''
def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , A)
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}")
_UpperCAmelCase = self.rope_scaling.get('type' , A)
_UpperCAmelCase = self.rope_scaling.get('factor' , A)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 339 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Dict = 5
UpperCAmelCase__ : Union[str, Any] = 1_0
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : int = SpeechaTextTokenizer
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : List[Any] = True
def __magic_name__ (self ) -> int:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : List[str] = sp.SentencePieceProcessor()
spm_model.Load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(SCREAMING_SNAKE_CASE__ ) )]
SCREAMING_SNAKE_CASE__ : Any = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = """<pad>"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_01 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2_89, 50, 14, 1_74, 3_86] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""input_ids""": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = '''valhalla/s2t_mustc_multilinguial_medium'''
__UpperCamelCase : Tuple = '''C\'est trop cool'''
__UpperCamelCase : Optional[int] = '''Esto es genial'''
@classmethod
def __magic_name__ (cls ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : str = [ES_CODE, 4, 16_01, 47, 76_47, 2]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """fr"""
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE__ : Optional[int] = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 25 |
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 339 | 0 |
import torch
from diffusers import DiffusionPipeline
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
def __call__( self ) -> Dict:
_A : str = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_A : Any = 1
_A : List[Any] = self.unet(_a , _a ).sample
_A : Dict = self.scheduler.step(_a , _a , _a ).prev_sample
_A : int = scheduler_output - scheduler_output + torch.ones_like(_a )
return result
| 26 |
from functools import reduce
UpperCAmelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( _UpperCAmelCase : str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Any = {'vocab_file': 'spiece.model'}
__lowercase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__lowercase : Tuple = {
'google/bigbird-roberta-base': 40_96,
'google/bigbird-roberta-large': 40_96,
'google/bigbird-base-trivia-itc': 40_96,
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["input_ids", "attention_mask"]
A_ = []
def __init__( self , __a , __a="<unk>" , __a="<s>" , __a="</s>" , __a="<pad>" , __a="[SEP]" , __a="[MASK]" , __a="[CLS]" , __a = None , **__a , ):
'''simple docstring'''
__a : int = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
__a : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__a : Dict = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
__a : Optional[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__a : Optional[int] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
__a : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__a : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__a : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , sep_token=__a , mask_token=__a , cls_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__a : Tuple = vocab_file
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
__a : Dict = self.__dict__.copy()
__a : Tuple = None
return state
def __setstate__( self , __a ):
'''simple docstring'''
__a : Tuple = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Any = {}
__a : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.sp_model.encode(__a , out_type=__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return self.sp_model.piece_to_id(__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : str = self.sp_model.IdToPiece(__a )
return token
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = []
__a : Tuple = ''
__a : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
__a : Any = True
__a : Optional[int] = []
else:
current_sub_tokens.append(__a )
__a : Optional[int] = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __UpperCAmelCase ( self , __a , __a = False , __a = None , __a = True , **__a , ):
'''simple docstring'''
__a : Tuple = kwargs.pop('use_source_tokenizer' , __a )
__a : Optional[int] = self.convert_ids_to_tokens(__a , skip_special_tokens=__a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__a : int = []
__a : Union[str, Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a ) )
__a : Union[str, Any] = []
sub_texts.append(__a )
else:
current_sub_text.append(__a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__a : List[str] = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(__a ) )
else:
__a : str = ''.join(__a )
__a : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__a : Any = self.clean_up_tokenization(__a )
return clean_text
else:
return text
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : List[Any] = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , 'wb' ) as fi:
__a : Tuple = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a : Optional[int] = [self.cls_token_id]
__a : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self , __a , __a = None , __a = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : Any = [self.sep_token_id]
__a : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 27 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for row in range(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = matrix[row][col]
_UpperCAmelCase = vector[row][0]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCAmelCase ):
_UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCAmelCase ):
for row in range(_UpperCAmelCase ):
_UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase )
]
def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for x_val, y_val in enumerate(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase = y_val
_UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase )
def interpolated_func(_UpperCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCAmelCase ) )
return interpolated_func
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int:
'''simple docstring'''
_UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase = 0
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for poly in polynomials:
_UpperCAmelCase = 1
while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ):
x_val += 1
ret += poly(_UpperCAmelCase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = VideoClassificationPipeline(model=UpperCamelCase__ , image_processor=UpperCamelCase__ , top_k=2 )
UpperCamelCase = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
for example in examples:
UpperCamelCase = video_classifier(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{'score': ANY(UpperCamelCase__ ), 'label': ANY(UpperCamelCase__ )},
{'score': ANY(UpperCamelCase__ ), 'label': ANY(UpperCamelCase__ )},
] , )
@require_torch
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCamelCase = VideoMAEFeatureExtractor(
size={'shortest_edge': 1_0} , crop_size={'height': 1_0, 'width': 1_0} )
UpperCamelCase = pipeline(
'video-classification' , model=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , frame_sampling_rate=4 )
UpperCamelCase = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = video_classifier(UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}] , )
UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
] , )
@require_tf
def A ( self : List[Any] ):
"""simple docstring"""
pass
| 28 |
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] ) -> bool:
'''simple docstring'''
return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | 0 |
from __future__ import annotations
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return [ord(__snake_case ) - 96 for elem in plain]
def lowercase__ ( __snake_case : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , __snake_case )
print('Decoded:' , decode(__snake_case ) )
if __name__ == "__main__":
main()
| 29 |
import os
UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(_UpperCAmelCase ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase )
_UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase )
savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[Any] = DownBlockaD # noqa F405
a :Any = 'down'
def _lowercase ( self : Dict ) -> str:
lowercase_ = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :int = ResnetDownsampleBlockaD # noqa F405
a :Dict = 'down'
def _lowercase ( self : Dict ) -> int:
lowercase_ = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :int = AttnDownBlockaD # noqa F405
a :Tuple = 'down'
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase_ = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = CrossAttnDownBlockaD # noqa F405
a :str = 'down'
def _lowercase ( self : List[Any] ) -> Optional[Any]:
lowercase_ , lowercase_ = super().prepare_init_args_and_inputs_for_common()
lowercase_ = 3_2
return init_dict, inputs_dict
def _lowercase ( self : List[Any] ) -> Dict:
lowercase_ = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[str] = SimpleCrossAttnDownBlockaD # noqa F405
a :List[Any] = 'down'
@property
def _lowercase ( self : Tuple ) -> Dict:
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase_ , lowercase_ = super().prepare_init_args_and_inputs_for_common()
lowercase_ = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Dict = SkipDownBlockaD # noqa F405
a :str = 'down'
@property
def _lowercase ( self : int ) -> Optional[int]:
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] ) -> List[str]:
lowercase_ = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[Any] = AttnSkipDownBlockaD # noqa F405
a :Optional[Any] = 'down'
@property
def _lowercase ( self : Optional[int] ) -> List[str]:
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> Dict:
lowercase_ = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Dict = DownEncoderBlockaD # noqa F405
a :Tuple = 'down'
@property
def _lowercase ( self : List[Any] ) -> Optional[int]:
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> Dict:
lowercase_ = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self : List[Any] ) -> Tuple:
lowercase_ = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[int] = AttnDownEncoderBlockaD # noqa F405
a :Optional[Any] = 'down'
@property
def _lowercase ( self : List[str] ) -> Dict:
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self : str ) -> Any:
lowercase_ = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Dict = UNetMidBlockaD # noqa F405
a :str = 'mid'
def _lowercase ( self : Any ) -> int:
lowercase_ = {
'''in_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self : Optional[Any] ) -> Any:
lowercase_ = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[Any] = UNetMidBlockaDCrossAttn # noqa F405
a :str = 'mid'
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ , lowercase_ = super().prepare_init_args_and_inputs_for_common()
lowercase_ = 3_2
return init_dict, inputs_dict
def _lowercase ( self : Any ) -> str:
lowercase_ = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = UNetMidBlockaDSimpleCrossAttn # noqa F405
a :List[str] = 'mid'
@property
def _lowercase ( self : Any ) -> int:
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase_ , lowercase_ = super().prepare_init_args_and_inputs_for_common()
lowercase_ = 3_2
return init_dict, inputs_dict
def _lowercase ( self : Tuple ) -> int:
lowercase_ = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = UpBlockaD # noqa F405
a :Optional[int] = 'up'
@property
def _lowercase ( self : List[str] ) -> Dict:
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase_ = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[Any] = ResnetUpsampleBlockaD # noqa F405
a :Tuple = 'up'
@property
def _lowercase ( self : int ) -> Dict:
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Any = CrossAttnUpBlockaD # noqa F405
a :Optional[Any] = 'up'
@property
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
lowercase_ , lowercase_ = super().prepare_init_args_and_inputs_for_common()
lowercase_ = 3_2
return init_dict, inputs_dict
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
lowercase_ = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
a :List[str] = 'up'
@property
def _lowercase ( self : Tuple ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> List[str]:
lowercase_ , lowercase_ = super().prepare_init_args_and_inputs_for_common()
lowercase_ = 3_2
return init_dict, inputs_dict
def _lowercase ( self : Dict ) -> Any:
lowercase_ = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[int] = AttnUpBlockaD # noqa F405
a :Tuple = 'up'
@property
def _lowercase ( self : Any ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[int] = SkipUpBlockaD # noqa F405
a :Tuple = 'up'
@property
def _lowercase ( self : Tuple ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> Optional[int]:
lowercase_ = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Union[str, Any] = AttnSkipUpBlockaD # noqa F405
a :List[Any] = 'up'
@property
def _lowercase ( self : Optional[Any] ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> List[str]:
lowercase_ = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Any = UpDecoderBlockaD # noqa F405
a :Optional[Any] = 'up'
@property
def _lowercase ( self : Dict ) -> Union[str, Any]:
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> Tuple:
lowercase_ = {'''in_channels''': 3_2, '''out_channels''': 3_2}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self : int ) -> Tuple:
lowercase_ = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[Any] = AttnUpDecoderBlockaD # noqa F405
a :List[str] = 'up'
@property
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] ) -> str:
lowercase_ = {'''in_channels''': 3_2, '''out_channels''': 3_2}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(SCREAMING_SNAKE_CASE_ )
| 30 |
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' )
_UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
_UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 339 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE : Optional[int] = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerModelTester(self)
@slow
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A)
_UpperCAmelCase = model(np.ones((1, 1)))
self.assertIsNotNone(A)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
_UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]])
_UpperCAmelCase = model(A)[0]
_UpperCAmelCase = 5_00_00
_UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , A)
_UpperCAmelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
| 339 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 |
UpperCAmelCase__ = {}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_UpperCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 )
_UpperCAmelCase = state_late + state_absent + state_ontime
_UpperCAmelCase = prizestrings
return prizestrings
def A ( _UpperCAmelCase : int = 30 ) -> int:
'''simple docstring'''
return _calculate(_UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 339 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers")
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = find_backend(' if not is_torch_available():')
self.assertEqual(A , 'torch')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(A , 'torch_and_transformers')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCAmelCase = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(A , 'torch_and_transformers_and_onnx')
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A)
self.assertIn('torch_and_transformers' , A)
self.assertIn('flax_and_transformers' , A)
self.assertIn('torch_and_transformers_and_onnx' , A)
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'])
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'])
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'])
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(A , '\nCONSTANT = None\n')
_UpperCAmelCase = create_dummy_object('function' , '\'torch\'')
self.assertEqual(
A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
_UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(A , A)
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , A)
| 339 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _a ( __a ):
__a : List[str] = """mobilenet_v2"""
def __init__( self : List[Any] , lowercase : Any=3 , lowercase : Tuple=224 , lowercase : Dict=1.0 , lowercase : Union[str, Any]=8 , lowercase : str=8 , lowercase : Dict=6 , lowercase : Dict=32 , lowercase : Optional[int]=True , lowercase : Any=True , lowercase : List[Any]="relu6" , lowercase : Tuple=True , lowercase : Dict=0.8 , lowercase : int=0.02 , lowercase : List[Any]=0.001 , lowercase : Union[str, Any]=255 , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = depth_multiplier
UpperCAmelCase = depth_divisible_by
UpperCAmelCase = min_depth
UpperCAmelCase = expand_ratio
UpperCAmelCase = output_stride
UpperCAmelCase = first_layer_is_expansion
UpperCAmelCase = finegrained_output
UpperCAmelCase = hidden_act
UpperCAmelCase = tf_padding
UpperCAmelCase = classifier_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = semantic_loss_ignore_index
class _a ( __a ):
__a : List[Any] = version.parse("""1.11""" )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def A ( self : Dict ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def A ( self : str ):
'''simple docstring'''
return 1E-4
| 34 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
UpperCamelCase = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def A ( ) -> Optional[int]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
_UpperCAmelCase = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase = raw_datasets['train'].features['label'].names
_UpperCAmelCase = len(_UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , )
_UpperCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase = {'Refused': 0, 'Entailed': 1}
_UpperCAmelCase = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ):
_UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase = examples['statement']
_UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase )
_UpperCAmelCase = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_UpperCAmelCase = raw_datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCAmelCase = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCAmelCase = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_UpperCAmelCase = raw_datasets['test']
if data_args.max_predict_samples is not None:
_UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase : EvalPrediction ):
_UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 )
else:
_UpperCAmelCase = None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCAmelCase )
trainer.save_metrics('train' , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase )
_UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase = predict_dataset.remove_columns('label' )
_UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
_UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = label_list[item]
writer.write(F"{index}\t{item}\n" )
_UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 339 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __snake_case( _lowerCAmelCase ) -> Tuple:
if not is_accelerate_available():
return method
snake_case__ : List[Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowerCAmelCase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *_lowerCAmelCase , **_lowerCAmelCase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *_lowerCAmelCase , **_lowerCAmelCase )
return wrapper
| 35 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase = multiprocessing.Manager()
_UpperCAmelCase = manager.list()
_UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCAmelCase = shutil.rmtree
_UpperCAmelCase = os.rmdir
_UpperCAmelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCAmelCase = {}
with swallow_io():
with time_limit(_UpperCAmelCase ):
exec(_UpperCAmelCase , _UpperCAmelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
_UpperCAmelCase = rmtree
_UpperCAmelCase = rmdir
_UpperCAmelCase = chdir
@contextlib.contextmanager
def A ( _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase )
signal.signal(signal.SIGALRM , _UpperCAmelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCAmelCase ):
with contextlib.redirect_stderr(_UpperCAmelCase ):
with redirect_stdin(_UpperCAmelCase ):
yield
@contextlib.contextmanager
def A ( ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCAmelCase ):
yield dirname
class __lowerCAmelCase ( A ):
pass
class __lowerCAmelCase ( io.StringIO ):
def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]:
"""simple docstring"""
return False
class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore
UpperCamelCase = '''stdin'''
@contextlib.contextmanager
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if root == ".":
yield
return
_UpperCAmelCase = os.getcwd()
os.chdir(_UpperCAmelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str]=None ) -> Any:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCAmelCase = None
_UpperCAmelCase = None
import os
_UpperCAmelCase = '1'
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import shutil
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import subprocess
_UpperCAmelCase = None # type: ignore
_UpperCAmelCase = None
import sys
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
| 339 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = FileLock(str(tmpdir / "foo.lock" ) )
_lowerCAmelCase : Union[str, Any] = FileLock(str(tmpdir / "foo.lock" ) )
_lowerCAmelCase : Optional[int] = 0.01
with locka.acquire():
with pytest.raises(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = time.time()
locka.acquire(_lowerCamelCase )
assert time.time() - _start > timeout
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "a" * 1_000 + ".lock"
_lowerCAmelCase : Union[str, Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(_lowerCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
_lowerCAmelCase : str = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowerCamelCase ):
locka.acquire(0 )
| 36 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str:
'''simple docstring'''
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False)
def A ( _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
return unittest.skip('Test was skipped' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict:
'''simple docstring'''
if test_case is None:
return partial(_UpperCAmelCase , version=_UpperCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase )
UpperCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase )
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]) -> str:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A)
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = AcceleratorState()
_UpperCAmelCase = tensor[None].clone().to(state.device )
_UpperCAmelCase = gather(_UpperCAmelCase ).cpu()
_UpperCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCAmelCase ):
return False
return True
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_UpperCAmelCase )
else:
break
async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(_UpperCAmelCase )
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput:
'''simple docstring'''
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) )
_UpperCAmelCase = ' '.join(_UpperCAmelCase )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class __lowerCAmelCase ( A ):
pass
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
_UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCAmelCase , 'decode' ):
_UpperCAmelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 339 | 0 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as input_file:
lowerCAmelCase__ : List[str] = re.compile(R"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
lowerCAmelCase__ : Any = input_file.read()
lowerCAmelCase__ : Any = regexp.search(__UpperCAmelCase )
return match
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as input_file:
lowerCAmelCase__ : str = re.compile(R"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" ,re.DOTALL )
lowerCAmelCase__ : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCAmelCase__ : Any = regexp.finditer(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : str = Path("""./datasets""" )
lowerCAmelCase__ : Dict = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__UpperCAmelCase ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Union[str, Any] = Path("""./datasets""" )
lowerCAmelCase__ : Optional[int] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__UpperCAmelCase ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 37 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( _UpperCAmelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
_UpperCAmelCase = 0
return None
def A ( _UpperCAmelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 339 | 0 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] ) -> list[int]:
"""simple docstring"""
UpperCamelCase :List[str] = len(__magic_name__ )
for i in range(__magic_name__ ):
for j in range(i + 1 , __magic_name__ ):
if numbers[j] < numbers[i]:
UpperCamelCase , UpperCamelCase :Tuple = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase_ : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 38 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]:
"""simple docstring"""
import nltk
nltk.download('wordnet')
if NLTK_VERSION >= version.Version('3.6.5'):
nltk.download('punkt')
if NLTK_VERSION >= version.Version('3.6.6'):
nltk.download('omw-1.4')
def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any:
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5'):
_UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
else:
_UpperCAmelCase = [
meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
return {"meteor": np.mean(A)}
| 339 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 39 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase__ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def A ( _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
print(F"{key} -> {new_key}" )
_UpperCAmelCase = s_dict.pop(_UpperCAmelCase )
return s_dict
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_UpperCAmelCase = emb.weight.data
return lin_layer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase = os.path.basename(_UpperCAmelCase )
_UpperCAmelCase = url.split('/' )[-2]
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop:
while True:
_UpperCAmelCase = source.read(8_192 )
if not buffer:
break
output.write(_UpperCAmelCase )
loop.update(len(_UpperCAmelCase ) )
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
_UpperCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
_UpperCAmelCase = original_checkpoint['dims']
_UpperCAmelCase = original_checkpoint['model_state_dict']
_UpperCAmelCase = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(_UpperCAmelCase )
rename_keys(_UpperCAmelCase )
_UpperCAmelCase = True
_UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0]
_UpperCAmelCase = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
_UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F" but all the following weights are missing {missing}" )
if tie_embeds:
_UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase = proj_out_weights
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 339 | 0 |
"""simple docstring"""
def lowercase ( A_ , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
a : List[Any] = [False] * len(A_ )
a : int = []
queue.append(A_ )
a : int = True
while queue:
a : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A_ )
a : Dict = True
a : Optional[int] = u
return visited[t]
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
a : int = [-1] * (len(A_ ))
a : List[Any] = 0
while bfs(A_ , A_ , A_ , A_ ):
a : Tuple = float("Inf" )
a : List[str] = sink
while s != source:
# Find the minimum value in select path
a : List[Any] = min(A_ , graph[parent[s]][s] )
a : str = parent[s]
max_flow += path_flow
a : Optional[Any] = sink
while v != source:
a : List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
a : str = parent[v]
return max_flow
__lowercase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__lowercase , __lowercase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 40 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCamelCase = None
UpperCamelCase = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
UpperCamelCase = datasets.Audio()
UpperCamelCase = '''audio'''
UpperCamelCase = AudioFolderConfig
UpperCamelCase = 42 # definition at the bottom of the script
UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' )
UpperCAmelCase__ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase__ = AUDIO_EXTENSIONS
| 339 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 4 ) -> list[list[int]]:
lowerCamelCase__ : Union[str, Any] = abs(UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(UpperCamelCase )] for y in range(UpperCamelCase )]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[list[int]]:
return reverse_row(transpose(UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[list[int]]:
return reverse_row(reverse_column(UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[list[int]]:
return reverse_column(transpose(UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[list[int]]:
lowerCamelCase__ : List[Any] = [list(UpperCamelCase ) for x in zip(*UpperCamelCase )]
return matrix
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[list[int]]:
lowerCamelCase__ : str = matrix[::-1]
return matrix
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[list[int]]:
lowerCamelCase__ : int = [x[::-1] for x in matrix]
return matrix
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> None:
for i in matrix:
print(*UpperCamelCase )
if __name__ == "__main__":
_A : str =make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
_A : Optional[int] =make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
_A : List[str] =make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 41 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
def __init__( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = []
def _lowerCamelCase ( self : Any , A : List[str]) -> int:
"""simple docstring"""
return self.node_position[vertex]
def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = pos
def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCAmelCase = 2 * start + 1
else:
_UpperCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child]
_UpperCAmelCase , _UpperCAmelCase = (
heap[start],
positions[start],
)
_UpperCAmelCase , _UpperCAmelCase = temp, tempa
_UpperCAmelCase = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , A)
self.top_to_bottom(A , A , A , A)
def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any:
"""simple docstring"""
_UpperCAmelCase = position[index]
while index != 0:
_UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
_UpperCAmelCase = heap[parent]
_UpperCAmelCase = position[parent]
self.set_position(position[parent] , A)
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , A)
break
_UpperCAmelCase = parent
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , 0)
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = len(A) // 2 - 1
for i in range(A , -1 , -1):
self.top_to_bottom(A , A , len(A) , A)
def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = positions[0]
_UpperCAmelCase = sys.maxsize
self.top_to_bottom(A , 0 , len(A) , A)
return temp
def A ( _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
_UpperCAmelCase = Heap()
_UpperCAmelCase = [0] * len(_UpperCAmelCase )
_UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCAmelCase = []
for vertex in range(len(_UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCAmelCase )
heap.node_position.append(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = 1
_UpperCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCAmelCase = 0
_UpperCAmelCase = distance
heap.heapify(_UpperCAmelCase , _UpperCAmelCase )
for _ in range(1 , len(_UpperCAmelCase ) ):
_UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCAmelCase )]
):
_UpperCAmelCase = distance
heap.bottom_to_top(
_UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 339 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase : Union[str, Any] = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]:
'''simple docstring'''
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
_UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1
_UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple
_UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_UpperCAmelCase = logits[0, masked_index, :]
_UpperCAmelCase = logits.softmax(dim=0 )
_UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 )
_UpperCAmelCase = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] )
_UpperCAmelCase = tokenizer.mask_token
_UpperCAmelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
_UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(_UpperCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base")
UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
UpperCAmelCase__ = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 339 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__lowercase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
__UpperCamelCase :List[str] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase :List[Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__UpperCamelCase :Tuple = '''cpu'''
__UpperCamelCase :Union[str, Any] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , torch_dtype=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = Path(SCREAMING_SNAKE_CASE )
# TEXT ENCODER
__UpperCamelCase :str = pipeline.text_encoder.config.max_position_embeddings
__UpperCamelCase :int = pipeline.text_encoder.config.hidden_size
__UpperCamelCase :int = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=SCREAMING_SNAKE_CASE , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.text_encoder
# UNET
__UpperCamelCase :Dict = pipeline.unet.config.in_channels
__UpperCamelCase :Union[str, Any] = pipeline.unet.config.sample_size
__UpperCamelCase :Optional[Any] = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=SCREAMING_SNAKE_CASE , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Optional[Any] = str(unet_path.absolute().as_posix() )
__UpperCamelCase :Union[str, Any] = os.path.dirname(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = onnx.load(SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(SCREAMING_SNAKE_CASE )
os.mkdir(SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , save_as_external_data=SCREAMING_SNAKE_CASE , all_tensors_to_one_file=SCREAMING_SNAKE_CASE , location='''weights.pb''' , convert_attribute=SCREAMING_SNAKE_CASE , )
del pipeline.unet
# VAE ENCODER
__UpperCamelCase :str = pipeline.vae
__UpperCamelCase :str = vae_encoder.config.in_channels
__UpperCamelCase :Union[str, Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__UpperCamelCase :Optional[int] = lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : vae_encoder.encode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=SCREAMING_SNAKE_CASE , )
# VAE DECODER
__UpperCamelCase :str = pipeline.vae
__UpperCamelCase :Optional[int] = vae_decoder.config.latent_channels
__UpperCamelCase :str = vae_decoder.config.out_channels
# forward only through the decoder part
__UpperCamelCase :Dict = vae_encoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__UpperCamelCase :Any = pipeline.safety_checker
__UpperCamelCase :Tuple = safety_checker.config.vision_config.num_channels
__UpperCamelCase :Any = safety_checker.config.vision_config.image_size
__UpperCamelCase :Tuple = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.safety_checker
__UpperCamelCase :Optional[int] = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
__UpperCamelCase :List[Any] = pipeline.feature_extractor
else:
__UpperCamelCase :Any = None
__UpperCamelCase :str = None
__UpperCamelCase :Tuple = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
print('''ONNX pipeline saved to''' , SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
__UpperCamelCase :Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
__lowercase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 43 |
import math
import unittest
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
with self.assertRaises(A):
is_prime(-19)
self.assertFalse(
is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 339 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 44 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : str) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A , hypotheses=A , min_len=A , max_len=A)
}
| 339 | 0 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, Iterable[int]] , lowerCAmelCase__ : bool , lowerCAmelCase__ : int ) -> Tuple[int, int]:
def constraint_to_multiple_of(lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str]=0 , lowerCAmelCase__ : List[str]=None ):
__a = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__a = math.floor(val / multiple ) * multiple
if x < min_val:
__a = math.ceil(val / multiple ) * multiple
return x
__a = (output_size, output_size) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else output_size
__a , __a = get_image_size(lowerCAmelCase__ )
__a , __a = output_size
# determine new height and width
__a = output_height / input_height
__a = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__a = scale_width
else:
# fit height
__a = scale_height
__a = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase__ )
__a = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase__ )
return (new_height, new_width)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = ['pixel_values']
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = False , _a = 1 , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ):
super().__init__(**_a )
__a = size if size is not None else {'''height''': 384, '''width''': 384}
__a = get_size_dict(_a )
__a = do_resize
__a = size
__a = keep_aspect_ratio
__a = ensure_multiple_of
__a = resample
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self , _a , _a , _a = False , _a = 1 , _a = PILImageResampling.BICUBIC , _a = None , **_a , ):
__a = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
__a = get_resize_output_image_size(
_a , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_a , multiple=_a , )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __UpperCAmelCase ( self , _a , _a , _a = None , **_a , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def __UpperCAmelCase ( self , _a , _a , _a , _a = None , **_a , ):
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __UpperCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(_a )
__a = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__a = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__a = [to_numpy_array(_a ) for image in images]
if do_resize:
__a = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
__a = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
__a = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
__a = [to_channel_dimension_format(_a , _a ) for image in images]
__a = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
def __UpperCAmelCase ( self , _a , _a = None ):
__a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_a ):
__a = target_sizes.numpy()
__a = []
for idx in range(len(_a ) ):
__a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_a )
__a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
__a = logits.argmax(dim=1 )
__a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 45 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 339 | 0 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE__ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
SCREAMING_SNAKE_CASE__ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def _snake_case ( self , lowercase , lowercase , lowercase = False , lowercase = False , lowercase = False , lowercase = False , ) -> Optional[int]:
lowerCAmelCase = len(references[0] )
if any(len(lowercase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCAmelCase = [[refs[i] for refs in references] for i in range(lowercase )]
lowerCAmelCase = TER(
normalized=lowercase , no_punct=lowercase , asian_support=lowercase , case_sensitive=lowercase , )
lowerCAmelCase = sb_ter.corpus_score(lowercase , lowercase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 46 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''open-llama'''
def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , A)
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}")
_UpperCAmelCase = self.rope_scaling.get('type' , A)
_UpperCAmelCase = self.rope_scaling.get('factor' , A)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 339 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class A__ ( A__ ):
A__ = 42
A__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class A__ ( A__ ):
A__ = 42
A__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 47 |
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 339 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = """ibert"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=False , UpperCamelCase__="none" , **UpperCamelCase__ , ) -> Union[str, Any]:
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : Tuple = type_vocab_size
lowerCamelCase : int = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : List[str] = position_embedding_type
lowerCamelCase : Union[str, Any] = quant_mode
lowerCamelCase : Tuple = force_dequant
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 48 |
from functools import reduce
UpperCAmelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( _UpperCAmelCase : str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case :List[str] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Dict = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__snake_case :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for row in range(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = matrix[row][col]
_UpperCAmelCase = vector[row][0]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCAmelCase ):
_UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCAmelCase ):
for row in range(_UpperCAmelCase ):
_UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase )
]
def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for x_val, y_val in enumerate(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase = y_val
_UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase )
def interpolated_func(_UpperCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCAmelCase ) )
return interpolated_func
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int:
'''simple docstring'''
_UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase = 0
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for poly in polynomials:
_UpperCAmelCase = 1
while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ):
x_val += 1
ret += poly(_UpperCAmelCase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[str] ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : List[str] ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Tuple , *UpperCAmelCase : int , **UpperCAmelCase : Union[str, Any] ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ) -> List[str]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : List[Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Optional[int] ) -> List[Any]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Tuple , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Dict ) -> List[str]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : str ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *UpperCAmelCase : str , **UpperCAmelCase : str ) -> List[str]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Any , *UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Tuple ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[str] ) -> List[Any]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : int , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : str ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : int , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Any , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : str , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ) -> int:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Optional[int] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Dict ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[str] ) -> str:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Optional[Any] ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : int , *UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Dict ) -> Any:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Any , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ) -> List[str]:
requires_backends(cls , ['torch'] )
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(_UpperCAmelCase , ['torch'] )
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(_UpperCAmelCase , ['torch'] )
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
requires_backends(_UpperCAmelCase , ['torch'] )
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(_UpperCAmelCase , ['torch'] )
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
requires_backends(_UpperCAmelCase , ['torch'] )
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(_UpperCAmelCase , ['torch'] )
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(_UpperCAmelCase , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Any , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[str] ) -> Dict:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Optional[int] ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[str] ) -> Tuple:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[int] ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ) -> Dict:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Any , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Any , *UpperCAmelCase : Any , **UpperCAmelCase : Optional[Any] ) -> int:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : int , *UpperCAmelCase : str , **UpperCAmelCase : List[str] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Dict , *UpperCAmelCase : int , **UpperCAmelCase : Any ) -> Any:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Tuple , *UpperCAmelCase : Any , **UpperCAmelCase : Any ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Any ) -> Tuple:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Any ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Any , *UpperCAmelCase : Any , **UpperCAmelCase : str ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Any ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : str ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : int ) -> int:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Dict , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[int] ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : str , *UpperCAmelCase : List[str] , **UpperCAmelCase : Dict ) -> Dict:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : str ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : int , *UpperCAmelCase : Any , **UpperCAmelCase : List[str] ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[int] ) -> Dict:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : List[Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[int] ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : str , *UpperCAmelCase : int , **UpperCAmelCase : int ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Any , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> Dict:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Tuple , *UpperCAmelCase : int , **UpperCAmelCase : str ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Dict , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[str] ) -> List[Any]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : str ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[Any] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : int , *UpperCAmelCase : List[str] , **UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : int , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> Any:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : List[str] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[str] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : int ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : str ) -> str:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Any , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[Any] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Any , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *UpperCAmelCase : Any , **UpperCAmelCase : str ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ) -> str:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Tuple ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[str] ) -> Dict:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Optional[int] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[int] ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> Dict:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : str ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Dict , *UpperCAmelCase : Tuple , **UpperCAmelCase : str ) -> Any:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Any ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Dict , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[str] ) -> str:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : List[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : str ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : str ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Tuple , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[int] ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : int , *UpperCAmelCase : int , **UpperCAmelCase : List[str] ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Dict ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Tuple ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Any , *UpperCAmelCase : List[str] , **UpperCAmelCase : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ) -> int:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : str , *UpperCAmelCase : Tuple , **UpperCAmelCase : str ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : int , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ) -> Dict:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : Any , **UpperCAmelCase : Optional[Any] ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> List[Any]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Any ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Any ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Tuple ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Any , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Any ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[int] ) -> Any:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Any , *UpperCAmelCase : str , **UpperCAmelCase : int ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Any ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : List[Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Tuple ) -> Tuple:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ) -> Dict:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : int , *UpperCAmelCase : str , **UpperCAmelCase : int ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Any , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ) -> List[Any]:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Optional[int] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[str] ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : List[str] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Dict , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : int ) -> int:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Dict ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Dict , *UpperCAmelCase : int , **UpperCAmelCase : Dict ) -> Dict:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Tuple ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Tuple , *UpperCAmelCase : str , **UpperCAmelCase : Dict ) -> Any:
requires_backends(cls , ['torch'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""torch"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[str] ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[int] ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def A_ ( cls : Any , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[int] ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
| 50 |
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] ) -> bool:
'''simple docstring'''
return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
snake_case_ : Optional[Any] = True
except ImportError:
snake_case_ : Any = False
try:
from torch.hub import _get_torch_home
snake_case_ : Any = _get_torch_home()
except ImportError:
snake_case_ : List[str] = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
snake_case_ : int = os.path.join(torch_cache_home, "transformers")
snake_case_ : str = "https://cdn.huggingface.co"
snake_case_ : Any = "https://s3.amazonaws.com/models.huggingface.co/bert"
snake_case_ : List[Any] = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
snake_case_ : Tuple = os.path.join(PATH, "config.yaml")
snake_case_ : Optional[Any] = os.path.join(PATH, "attributes.txt")
snake_case_ : List[Any] = os.path.join(PATH, "objects.txt")
snake_case_ : List[Any] = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
snake_case_ : Union[str, Any] = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
snake_case_ : Optional[Any] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
snake_case_ : str = "pytorch_model.bin"
snake_case_ : Tuple = "config.yaml"
def A (__A : Optional[Any]=OBJECTS , __A : Optional[Any]=ATTRIBUTES ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = []
with open(__A ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
UpperCAmelCase_ = []
with open(__A ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def A (__A : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = OrderedDict()
with open(__A , '''rb''' ) as f:
UpperCAmelCase_ = pkl.load(__A )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
UpperCAmelCase_ = ckp.pop(__A )
if isinstance(__A , np.ndarray ):
UpperCAmelCase_ = torch.tensor(__A )
else:
assert isinstance(__A , torch.tensor ), type(__A )
UpperCAmelCase_ = v
return r
class __snake_case :
UpperCAmelCase__ : str = {}
def __init__( self : Tuple , _snake_case : dict , _snake_case : str = "root" , _snake_case : Optional[Any]=0):
"""simple docstring"""
UpperCAmelCase_ = name
UpperCAmelCase_ = level
UpperCAmelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
UpperCAmelCase_ = copy.deepcopy(_snake_case)
UpperCAmelCase_ = copy.deepcopy(_snake_case)
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = Config(_snake_case , name=_snake_case , level=level + 1)
UpperCAmelCase_ = v
setattr(self , _snake_case , _snake_case)
UpperCAmelCase_ = d
def __repr__( self : Any):
"""simple docstring"""
return str(list((self._pointer.keys())))
def __setattr__( self : int , _snake_case : List[Any] , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = val
UpperCAmelCase_ = val
UpperCAmelCase_ = key.split('''.''')
UpperCAmelCase_ = len(_snake_case) - 1
UpperCAmelCase_ = self._pointer
if len(_snake_case) > 1:
for i, l in enumerate(_snake_case):
if hasattr(self , _snake_case) and isinstance(getattr(self , _snake_case) , _snake_case):
setattr(getattr(self , _snake_case) , '''.'''.join(levels[i:]) , _snake_case)
if l == last_level:
UpperCAmelCase_ = val
else:
UpperCAmelCase_ = pointer[l]
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self._pointer
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
with open(F"""{file_name}""" , '''w''') as stream:
dump(_snake_case , _snake_case)
def lowerCamelCase ( self : int , _snake_case : List[str] , _snake_case : Any):
"""simple docstring"""
with open(F"""{file_name}""" , '''w''') as stream:
json.dump(_snake_case , _snake_case)
@staticmethod
def lowerCamelCase ( _snake_case : str):
"""simple docstring"""
with open(_snake_case) as stream:
UpperCAmelCase_ = load(_snake_case , Loader=_snake_case)
return data
def __str__( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''' '''
if self._name != "root":
UpperCAmelCase_ = F"""{t * (self._level-1)}{self._name}:\n"""
else:
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(_snake_case , _snake_case):
r += F"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += F"""{t * (self._level)}{k}: {v} ({type(_snake_case).__name__})\n"""
UpperCAmelCase_ = level
return r[:-1]
@classmethod
def lowerCamelCase ( cls : Any , _snake_case : str , **_snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_snake_case , **_snake_case)
return cls(_snake_case)
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , _snake_case : str , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = kwargs.pop('''cache_dir''' , _snake_case)
UpperCAmelCase_ = kwargs.pop('''force_download''' , _snake_case)
UpperCAmelCase_ = kwargs.pop('''resume_download''' , _snake_case)
UpperCAmelCase_ = kwargs.pop('''proxies''' , _snake_case)
UpperCAmelCase_ = kwargs.pop('''local_files_only''' , _snake_case)
if os.path.isdir(_snake_case):
UpperCAmelCase_ = os.path.join(_snake_case , _snake_case)
elif os.path.isfile(_snake_case) or is_remote_url(_snake_case):
UpperCAmelCase_ = pretrained_model_name_or_path
else:
UpperCAmelCase_ = hf_bucket_url(_snake_case , filename=_snake_case , use_cdn=_snake_case)
try:
# Load from URL or cache if already cached
UpperCAmelCase_ = cached_path(
_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
UpperCAmelCase_ = Config.load_yaml(_snake_case)
except EnvironmentError:
UpperCAmelCase_ = '''Can\'t load config for'''
raise EnvironmentError(_snake_case)
if resolved_config_file == config_file:
print('''loading configuration file from path''')
else:
print('''loading configuration file cache''')
return Config.load_yaml(_snake_case), kwargs
def A (__A : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = torch.load('''dump.pt''' , map_location=in_tensor.device )
UpperCAmelCase_ = in_tensor.numpy()
UpperCAmelCase_ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__A , __A , rtol=0.01 , atol=0.1 ), (
F"""{sum([1 for x in np.isclose(__A , __A , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def A (__A : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = urlparse(__A )
return parsed.scheme in ("http", "https")
def A (__A : str , __A : str , __A : Any=True ) -> str:
"""simple docstring"""
UpperCAmelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
UpperCAmelCase_ = '''/''' not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def A (__A : List[Any] , __A : str , __A : Dict=None , __A : Optional[Any]=0 , __A : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__A , __A ):
ua += "; " + "; ".join('''{}/{}'''.format(__A , __A ) for k, v in user_agent.items() )
elif isinstance(__A , __A ):
ua += "; " + user_agent
UpperCAmelCase_ = {'''user-agent''': ua}
if resume_size > 0:
UpperCAmelCase_ = '''bytes=%d-''' % (resume_size,)
UpperCAmelCase_ = requests.get(__A , stream=__A , proxies=__A , headers=__A )
if response.status_code == 416: # Range not satisfiable
return
UpperCAmelCase_ = response.headers.get('''Content-Length''' )
UpperCAmelCase_ = resume_size + int(__A ) if content_length is not None else None
UpperCAmelCase_ = tqdm(
unit='''B''' , unit_scale=__A , total=__A , initial=__A , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__A ) )
temp_file.write(__A )
progress.close()
def A (__A : List[Any] , __A : Tuple=None , __A : Optional[Any]=False , __A : int=None , __A : List[Any]=10 , __A : Union[str, Any]=False , __A : List[Any]=None , __A : Union[str, Any]=False , ) -> Any:
"""simple docstring"""
if cache_dir is None:
UpperCAmelCase_ = TRANSFORMERS_CACHE
if isinstance(__A , __A ):
UpperCAmelCase_ = str(__A )
os.makedirs(__A , exist_ok=__A )
UpperCAmelCase_ = None
if not local_files_only:
try:
UpperCAmelCase_ = requests.head(__A , allow_redirects=__A , proxies=__A , timeout=__A )
if response.status_code == 200:
UpperCAmelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
UpperCAmelCase_ = url_to_filename(__A , __A )
# get cache path to put the file
UpperCAmelCase_ = os.path.join(__A , __A )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__A ):
return cache_path
else:
UpperCAmelCase_ = [
file
for file in fnmatch.filter(os.listdir(__A ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(__A ) > 0:
return os.path.join(__A , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(__A ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
UpperCAmelCase_ = cache_path + '''.lock'''
with FileLock(__A ):
# If the download just completed while the lock was activated.
if os.path.exists(__A ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
UpperCAmelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(__A , '''a+b''' ) as f:
yield f
UpperCAmelCase_ = _resumable_file_manager
if os.path.exists(__A ):
UpperCAmelCase_ = os.stat(__A ).st_size
else:
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = partial(tempfile.NamedTemporaryFile , dir=__A , delete=__A )
UpperCAmelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , __A , temp_file.name , )
http_get(
__A , __A , proxies=__A , resume_size=__A , user_agent=__A , )
os.replace(temp_file.name , __A )
UpperCAmelCase_ = {'''url''': url, '''etag''': etag}
UpperCAmelCase_ = cache_path + '''.json'''
with open(__A , '''w''' ) as meta_file:
json.dump(__A , __A )
return cache_path
def A (__A : Union[str, Any] , __A : Optional[Any]=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = url.encode('''utf-8''' )
UpperCAmelCase_ = shaaaa(__A )
UpperCAmelCase_ = url_hash.hexdigest()
if etag:
UpperCAmelCase_ = etag.encode('''utf-8''' )
UpperCAmelCase_ = shaaaa(__A )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def A (__A : List[Any] , __A : Optional[int]=None , __A : Optional[int]=False , __A : Optional[int]=None , __A : int=False , __A : List[str]=None , __A : Dict=False , __A : Union[str, Any]=False , __A : str=False , ) -> Any:
"""simple docstring"""
if cache_dir is None:
UpperCAmelCase_ = TRANSFORMERS_CACHE
if isinstance(__A , __A ):
UpperCAmelCase_ = str(__A )
if isinstance(__A , __A ):
UpperCAmelCase_ = str(__A )
if is_remote_url(__A ):
# URL, so get it from the cache (downloading if necessary)
UpperCAmelCase_ = get_from_cache(
__A , cache_dir=__A , force_download=__A , proxies=__A , resume_download=__A , user_agent=__A , local_files_only=__A , )
elif os.path.exists(__A ):
# File, and it exists.
UpperCAmelCase_ = url_or_filename
elif urlparse(__A ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(__A ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(__A ) )
if extract_compressed_file:
if not is_zipfile(__A ) and not tarfile.is_tarfile(__A ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
UpperCAmelCase_ , UpperCAmelCase_ = os.path.split(__A )
UpperCAmelCase_ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
UpperCAmelCase_ = os.path.join(__A , __A )
if os.path.isdir(__A ) and os.listdir(__A ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
UpperCAmelCase_ = output_path + '''.lock'''
with FileLock(__A ):
shutil.rmtree(__A , ignore_errors=__A )
os.makedirs(__A )
if is_zipfile(__A ):
with ZipFile(__A , '''r''' ) as zip_file:
zip_file.extractall(__A )
zip_file.close()
elif tarfile.is_tarfile(__A ):
UpperCAmelCase_ = tarfile.open(__A )
tar_file.extractall(__A )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(__A ) )
return output_path_extracted
return output_path
def A (__A : List[Any] , __A : List[Any]="," ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__A , __A )
if os.path.isfile(__A ):
with open(__A ) as f:
UpperCAmelCase_ = eval(f.read() )
else:
UpperCAmelCase_ = requests.get(__A )
try:
UpperCAmelCase_ = requests.json()
except Exception:
UpperCAmelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
UpperCAmelCase_ = eval(__A )
except Exception:
UpperCAmelCase_ = data.split('''\n''' )
req.close()
return data
def A (__A : int ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = requests.get(__A )
UpperCAmelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def A (__A : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__A )
with open(__A , '''rb''' ) as stream:
UpperCAmelCase_ = pkl.load(__A )
UpperCAmelCase_ = weights.pop('''model''' )
UpperCAmelCase_ = {}
for k, v in model.items():
UpperCAmelCase_ = torch.from_numpy(__A )
if "running_var" in k:
UpperCAmelCase_ = torch.tensor([0] )
UpperCAmelCase_ = k.replace('''running_var''' , '''num_batches_tracked''' )
UpperCAmelCase_ = zero
return new
def A () -> Optional[int]:
"""simple docstring"""
print(F"""{os.path.abspath(os.path.join(__A , os.pardir ) )}/demo.ipynb""" )
def A (__A : Any , __A : Optional[Any]="RGB" ) -> int:
"""simple docstring"""
assert isinstance(__A , __A )
if os.path.isfile(__A ):
UpperCAmelCase_ = cva.imread(__A )
else:
UpperCAmelCase_ = get_image_from_url(__A )
assert img is not None, F"""could not connect to: {im}"""
UpperCAmelCase_ = cva.cvtColor(__A , cva.COLOR_BGR2RGB )
if input_format == "RGB":
UpperCAmelCase_ = img[:, :, ::-1]
return img
def A (__A : List[Any] , __A : Dict=1 ) -> Union[str, Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(__A ) , __A ))
| 51 |
import os
UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(_UpperCAmelCase ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase )
_UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase )
savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : int = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase : Any = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
__lowerCamelCase : Optional[Any] = {
"""camembert-base""": 512,
}
__lowerCamelCase : Union[str, Any] = """▁"""
class A__ ( __snake_case ):
_UpperCAmelCase :List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :List[str] = ['input_ids', 'attention_mask']
def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , A_ = None , **A_ , ):
'''simple docstring'''
UpperCamelCase : Dict = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
UpperCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
UpperCamelCase : str = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
UpperCamelCase : str = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
UpperCamelCase : Union[str, Any] = len(self.fairseq_tokens_to_ids )
UpperCamelCase : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
UpperCamelCase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase( self , A_ , A_ = None , A_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Dict = [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.sp_model.encode(A_ , out_type=A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(A_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = []
UpperCamelCase : Optional[int] = ""
UpperCamelCase : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
UpperCamelCase : List[Any] = True
UpperCamelCase : Union[str, Any] = []
else:
current_sub_tokens.append(A_ )
UpperCamelCase : Optional[Any] = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase : Any = self.__dict__.copy()
UpperCamelCase : Any = None
return state
def __setstate__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase : Optional[Any] = {}
UpperCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[int] = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , "wb" ) as fi:
UpperCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 52 |
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' )
_UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
_UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 339 | 0 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 53 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerModelTester(self)
@slow
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A)
_UpperCAmelCase = model(np.ones((1, 1)))
self.assertIsNotNone(A)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
_UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]])
_UpperCAmelCase = model(A)[0]
_UpperCAmelCase = 5_00_00
_UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , A)
_UpperCAmelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
| 339 | 0 |
"""simple docstring"""
from math import sqrt
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
__SCREAMING_SNAKE_CASE = True
# 0 and 1 are none primes.
if number <= 1:
__SCREAMING_SNAKE_CASE = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__SCREAMING_SNAKE_CASE = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__SCREAMING_SNAKE_CASE = list(range(2 , n + 1 ) )
__SCREAMING_SNAKE_CASE = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__SCREAMING_SNAKE_CASE = 0
# filters actual prime numbers.
__SCREAMING_SNAKE_CASE = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
__SCREAMING_SNAKE_CASE = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
__SCREAMING_SNAKE_CASE = [] # this list will be returns of the function.
# potential prime number factors.
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
# prime factorization of 'number'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
__SCREAMING_SNAKE_CASE = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__SCREAMING_SNAKE_CASE = get_prime_numbers(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
# run variable for while-loops.
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = None
# exit variable. for break up the loops
__SCREAMING_SNAKE_CASE = True
while i < len_pn and loop:
__SCREAMING_SNAKE_CASE = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__SCREAMING_SNAKE_CASE = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE = 0
while numbera != 0:
__SCREAMING_SNAKE_CASE = numbera % numbera
__SCREAMING_SNAKE_CASE = numbera
__SCREAMING_SNAKE_CASE = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__SCREAMING_SNAKE_CASE = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__SCREAMING_SNAKE_CASE = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__SCREAMING_SNAKE_CASE = p_number_a + 1 # jump to the next number
__SCREAMING_SNAKE_CASE = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
__SCREAMING_SNAKE_CASE = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
__SCREAMING_SNAKE_CASE = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__SCREAMING_SNAKE_CASE = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
__SCREAMING_SNAKE_CASE = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1 # this will be return
for _ in range(n - 1 ):
__SCREAMING_SNAKE_CASE = ans
ans += fiba
__SCREAMING_SNAKE_CASE = tmp
return ans
| 54 |
UpperCAmelCase__ = {}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_UpperCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 )
_UpperCAmelCase = state_late + state_absent + state_ontime
_UpperCAmelCase = prizestrings
return prizestrings
def A ( _UpperCAmelCase : int = 30 ) -> int:
'''simple docstring'''
return _calculate(_UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 339 | 0 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : list ):
def merge(UpperCAmelCase_ : list , UpperCAmelCase_ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCAmelCase_ ) <= 1:
return collection
lowerCamelCase_ = len(UpperCAmelCase_ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : str = input("""Enter numbers separated by a comma:\n""").strip()
a_ : Tuple = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 55 |
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers")
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = find_backend(' if not is_torch_available():')
self.assertEqual(A , 'torch')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(A , 'torch_and_transformers')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCAmelCase = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(A , 'torch_and_transformers_and_onnx')
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A)
self.assertIn('torch_and_transformers' , A)
self.assertIn('flax_and_transformers' , A)
self.assertIn('torch_and_transformers_and_onnx' , A)
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'])
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'])
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'])
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(A , '\nCONSTANT = None\n')
_UpperCAmelCase = create_dummy_object('function' , '\'torch\'')
self.assertEqual(
A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
_UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(A , A)
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , A)
| 339 | 0 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
snake_case_ = str(__UpperCAmelCase )
return n == n[::-1]
def __magic_name__ ( __UpperCAmelCase = 100_0000 ) -> Optional[int]:
'''simple docstring'''
snake_case_ = 0
for i in range(1, __UpperCAmelCase ):
if is_palindrome(__UpperCAmelCase ) and is_palindrome(bin(__UpperCAmelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 56 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
UpperCamelCase = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def A ( ) -> Optional[int]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
_UpperCAmelCase = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase = raw_datasets['train'].features['label'].names
_UpperCAmelCase = len(_UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , )
_UpperCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase = {'Refused': 0, 'Entailed': 1}
_UpperCAmelCase = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ):
_UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase = examples['statement']
_UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase )
_UpperCAmelCase = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_UpperCAmelCase = raw_datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCAmelCase = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCAmelCase = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_UpperCAmelCase = raw_datasets['test']
if data_args.max_predict_samples is not None:
_UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase : EvalPrediction ):
_UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 )
else:
_UpperCAmelCase = None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCAmelCase )
trainer.save_metrics('train' , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase )
_UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase = predict_dataset.remove_columns('label' )
_UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
_UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = label_list[item]
writer.write(F"{index}\t{item}\n" )
_UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 339 | 0 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
A : str = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def _lowerCamelCase ( _UpperCamelCase=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowerCAmelCase__ ) )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =None
__UpperCAmelCase : List[str] =None
def snake_case ( self , __a , __a ):
with TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = dataset_module_factory(__a , cache_dir=__a )
__lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=__a )
__lowerCAmelCase = builder_cls(
cache_dir=__a , config_name=__a , hash=dataset_module.hash , )
__lowerCAmelCase = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__a ).replace(os.sep , "/" ),
config.DATASET_INFO_FILENAME,
] )
__lowerCAmelCase = cached_path(__a , cache_dir=__a )
self.assertTrue(os.path.exists(__a ) )
@pytest.mark.integration
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
__lowerCAmelCase = dataset_module_factory("wikipedia" , cache_dir=_UpperCamelCase )
__lowerCAmelCase = import_main_class(dataset_module.module_path )
__lowerCAmelCase = builder_cls(
cache_dir=_UpperCamelCase , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__lowerCAmelCase = None
builder_instance.download_and_prepare()
__lowerCAmelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = dataset_module_factory("wikipedia" , cache_dir=_UpperCamelCase )
__lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=_UpperCamelCase )
__lowerCAmelCase = builder_cls(
cache_dir=_UpperCamelCase , config_name="20220301.frr" , hash=dataset_module.hash , )
__lowerCAmelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert "train" in ds
assert isinstance(ds["train"] , _UpperCamelCase )
assert next(iter(ds["train"] ) )
| 57 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase = multiprocessing.Manager()
_UpperCAmelCase = manager.list()
_UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCAmelCase = shutil.rmtree
_UpperCAmelCase = os.rmdir
_UpperCAmelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCAmelCase = {}
with swallow_io():
with time_limit(_UpperCAmelCase ):
exec(_UpperCAmelCase , _UpperCAmelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
_UpperCAmelCase = rmtree
_UpperCAmelCase = rmdir
_UpperCAmelCase = chdir
@contextlib.contextmanager
def A ( _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase )
signal.signal(signal.SIGALRM , _UpperCAmelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCAmelCase ):
with contextlib.redirect_stderr(_UpperCAmelCase ):
with redirect_stdin(_UpperCAmelCase ):
yield
@contextlib.contextmanager
def A ( ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCAmelCase ):
yield dirname
class __lowerCAmelCase ( A ):
pass
class __lowerCAmelCase ( io.StringIO ):
def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]:
"""simple docstring"""
return False
class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore
UpperCamelCase = '''stdin'''
@contextlib.contextmanager
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if root == ".":
yield
return
_UpperCAmelCase = os.getcwd()
os.chdir(_UpperCAmelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str]=None ) -> Any:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCAmelCase = None
_UpperCAmelCase = None
import os
_UpperCAmelCase = '1'
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import shutil
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import subprocess
_UpperCAmelCase = None # type: ignore
_UpperCAmelCase = None
import sys
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
| 339 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( __lowerCamelCase : int = 4 ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = abs(__lowerCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__lowerCamelCase )] for y in range(__lowerCamelCase )]
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
return reverse_row(transpose(__lowerCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
return reverse_row(reverse_column(__lowerCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
return reverse_column(transpose(__lowerCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = [list(__lowerCamelCase ) for x in zip(*__lowerCamelCase )]
return matrix
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = matrix[::-1]
return matrix
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = [x[::-1] for x in matrix]
return matrix
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->None:
for i in matrix:
print(*__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
lowercase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
lowercase_ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 58 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str:
'''simple docstring'''
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False)
def A ( _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
return unittest.skip('Test was skipped' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict:
'''simple docstring'''
if test_case is None:
return partial(_UpperCAmelCase , version=_UpperCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase )
UpperCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase )
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]) -> str:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A)
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = AcceleratorState()
_UpperCAmelCase = tensor[None].clone().to(state.device )
_UpperCAmelCase = gather(_UpperCAmelCase ).cpu()
_UpperCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCAmelCase ):
return False
return True
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_UpperCAmelCase )
else:
break
async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(_UpperCAmelCase )
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput:
'''simple docstring'''
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) )
_UpperCAmelCase = ' '.join(_UpperCAmelCase )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class __lowerCAmelCase ( A ):
pass
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
_UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCAmelCase , 'decode' ):
_UpperCAmelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 339 | 0 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
snake_case : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
snake_case : Tuple = "xvjiarui/stable-diffusion-2-inpainting"
snake_case , snake_case : Dict = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case__ , safety_checker=snake_case__ )
snake_case : Tuple = "Face of a yellow cat, high resolution, sitting on a park bench"
snake_case : Optional[Any] = jax.random.PRNGKey(0 )
snake_case : List[str] = 50
snake_case : Tuple = jax.device_count()
snake_case : Optional[Any] = num_samples * [prompt]
snake_case : List[str] = num_samples * [init_image]
snake_case : str = num_samples * [mask_image]
snake_case , snake_case , snake_case : int = pipeline.prepare_inputs(snake_case__ , snake_case__ , snake_case__ )
# shard inputs and rng
snake_case : Tuple = replicate(snake_case__ )
snake_case : Optional[Any] = jax.random.split(snake_case__ , jax.device_count() )
snake_case : Union[str, Any] = shard(snake_case__ )
snake_case : Union[str, Any] = shard(snake_case__ )
snake_case : List[str] = shard(snake_case__ )
snake_case : int = pipeline(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ )
snake_case : List[str] = output.images.reshape(snake_case__ , 5_12 , 5_12 , 3 )
snake_case : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
snake_case : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case : Any = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 59 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( _UpperCAmelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
_UpperCAmelCase = 0
return None
def A ( _UpperCAmelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 339 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class snake_case_( a__ ):
__UpperCamelCase = '''data2vec-text'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str]=3_0_5_2_2 , UpperCamelCase_ : Union[str, Any]=7_6_8 , UpperCamelCase_ : List[str]=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : Optional[int]=3_0_7_2 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : List[Any]=5_1_2 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : Tuple=1E-12 , UpperCamelCase_ : Any=1 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Optional[Any]="absolute" , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=None , **UpperCamelCase_ : List[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : Tuple = use_cache
lowerCAmelCase : Union[str, Any] = classifier_dropout
class snake_case_( a__ ):
@property
def lowerCamelCase__ ( self : Any ):
if self.task == "multiple-choice":
lowerCAmelCase : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 60 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]:
"""simple docstring"""
import nltk
nltk.download('wordnet')
if NLTK_VERSION >= version.Version('3.6.5'):
nltk.download('punkt')
if NLTK_VERSION >= version.Version('3.6.6'):
nltk.download('omw-1.4')
def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any:
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5'):
_UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
else:
_UpperCAmelCase = [
meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
return {"meteor": np.mean(A)}
| 339 | 0 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
UpperCAmelCase_ : str = TapasConfig.from_json_file(__lowerCamelCase )
# set absolute/relative position embeddings parameter
UpperCAmelCase_ : Union[str, Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase_ : str = TapasForQuestionAnswering(config=__lowerCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase_ : Union[str, Any] = 4
UpperCAmelCase_ : int = True
# hparam_utils.py hparams
UpperCAmelCase_ : Optional[Any] = 0.66_4694
UpperCAmelCase_ : Tuple = 0.20_7951
UpperCAmelCase_ : Dict = 0.12_1194
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : int = False
UpperCAmelCase_ : str = 0.035_2513
UpperCAmelCase_ : List[Any] = TapasForQuestionAnswering(config=__lowerCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : List[str] = False
# hparam_utils.py hparams
UpperCAmelCase_ : List[Any] = 36.4519
UpperCAmelCase_ : int = 0.90_3421
UpperCAmelCase_ : Union[str, Any] = 222.088
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = 0.76_3141
UpperCAmelCase_ : Dict = TapasForQuestionAnswering(config=__lowerCamelCase )
elif task == "TABFACT":
UpperCAmelCase_ : List[Any] = TapasForSequenceClassification(config=__lowerCamelCase )
elif task == "MLM":
UpperCAmelCase_ : Optional[Any] = TapasForMaskedLM(config=__lowerCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase_ : int = TapasModel(config=__lowerCamelCase )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowerCamelCase )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
UpperCAmelCase_ : Union[str, Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt", model_max_length=512 )
tokenizer.save_pretrained(__lowerCamelCase )
print("Used relative position embeddings:", model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 61 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase__ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def A ( _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
print(F"{key} -> {new_key}" )
_UpperCAmelCase = s_dict.pop(_UpperCAmelCase )
return s_dict
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_UpperCAmelCase = emb.weight.data
return lin_layer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase = os.path.basename(_UpperCAmelCase )
_UpperCAmelCase = url.split('/' )[-2]
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop:
while True:
_UpperCAmelCase = source.read(8_192 )
if not buffer:
break
output.write(_UpperCAmelCase )
loop.update(len(_UpperCAmelCase ) )
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
_UpperCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
_UpperCAmelCase = original_checkpoint['dims']
_UpperCAmelCase = original_checkpoint['model_state_dict']
_UpperCAmelCase = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(_UpperCAmelCase )
rename_keys(_UpperCAmelCase )
_UpperCAmelCase = True
_UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0]
_UpperCAmelCase = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
_UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F" but all the following weights are missing {missing}" )
if tie_embeds:
_UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase = proj_out_weights
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 339 | 0 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _UpperCAmelCase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =list(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Exception ):
__UpperCamelCase =[
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : callable = None , SCREAMING_SNAKE_CASE__ : int = 1_28 ):
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__UpperCamelCase =list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1):
__UpperCamelCase =', '.join([F'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'Batch size was passed into `{function.__name__}` as the first argument when called.'
F'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 62 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCamelCase = None
UpperCamelCase = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
UpperCamelCase = datasets.Audio()
UpperCamelCase = '''audio'''
UpperCamelCase = AudioFolderConfig
UpperCamelCase = 42 # definition at the bottom of the script
UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' )
UpperCAmelCase__ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase__ = AUDIO_EXTENSIONS
| 339 | 0 |
'''simple docstring'''
from functools import reduce
lowerCAmelCase_ : Optional[Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _lowerCamelCase ( lowercase : str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase , lowercase : str(int(lowercase ) * int(lowercase ) ) , n[i : i + 13] ) )
for i in range(len(lowercase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
def __init__( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = []
def _lowerCamelCase ( self : Any , A : List[str]) -> int:
"""simple docstring"""
return self.node_position[vertex]
def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = pos
def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCAmelCase = 2 * start + 1
else:
_UpperCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child]
_UpperCAmelCase , _UpperCAmelCase = (
heap[start],
positions[start],
)
_UpperCAmelCase , _UpperCAmelCase = temp, tempa
_UpperCAmelCase = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , A)
self.top_to_bottom(A , A , A , A)
def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any:
"""simple docstring"""
_UpperCAmelCase = position[index]
while index != 0:
_UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
_UpperCAmelCase = heap[parent]
_UpperCAmelCase = position[parent]
self.set_position(position[parent] , A)
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , A)
break
_UpperCAmelCase = parent
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , 0)
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = len(A) // 2 - 1
for i in range(A , -1 , -1):
self.top_to_bottom(A , A , len(A) , A)
def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = positions[0]
_UpperCAmelCase = sys.maxsize
self.top_to_bottom(A , 0 , len(A) , A)
return temp
def A ( _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
_UpperCAmelCase = Heap()
_UpperCAmelCase = [0] * len(_UpperCAmelCase )
_UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCAmelCase = []
for vertex in range(len(_UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCAmelCase )
heap.node_position.append(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = 1
_UpperCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCAmelCase = 0
_UpperCAmelCase = distance
heap.heapify(_UpperCAmelCase , _UpperCAmelCase )
for _ in range(1 , len(_UpperCAmelCase ) ):
_UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCAmelCase )]
):
_UpperCAmelCase = distance
heap.bottom_to_top(
_UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 339 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 25_00_04
A_ = 25_00_20
@require_sentencepiece
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = MBartaaTokenizer
lowercase__ = MBartaaTokenizerFast
lowercase__ = True
lowercase__ = True
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : List[str] = MBartaaTokenizer(a_, src_lang="""en_XX""", tgt_lang="""ro_RO""", keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = """<s>"""
_snake_case : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<s>""" )
self.assertEqual(vocab_keys[1], """<pad>""" )
self.assertEqual(vocab_keys[-1], """<mask>""" )
self.assertEqual(len(a_ ), 1_054 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_054 )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = MBartaaTokenizer(a_, src_lang="""en_XX""", tgt_lang="""ro_RO""", keep_accents=a_ )
_snake_case : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_snake_case : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a_, [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""], )
_snake_case : Any = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
_snake_case : Any = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_, [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""], )
@slow
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_, model_name="""facebook/mbart-large-50""", revision="""d3913889c59cd5c9e456b269c376325eabad57e2""", )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_snake_case : Dict = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : Dict = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : List[str] = self.tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : Tuple = tokenizer_r.save_pretrained(a_ )
_snake_case : Tuple = tokenizer_p.save_pretrained(a_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_snake_case : List[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(a_, a_ )
# Checks everything loads correctly in the same way
_snake_case : List[Any] = tokenizer_r.from_pretrained(a_ )
_snake_case : Any = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_, a_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(a_ )
# Save tokenizer rust, legacy_format=True
_snake_case : Any = tempfile.mkdtemp()
_snake_case : List[str] = tokenizer_r.save_pretrained(a_, legacy_format=a_ )
_snake_case : List[str] = tokenizer_p.save_pretrained(a_ )
# Checks it save with the same files
self.assertSequenceEqual(a_, a_ )
# Checks everything loads correctly in the same way
_snake_case : Any = tokenizer_r.from_pretrained(a_ )
_snake_case : List[Any] = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_, a_ ) )
shutil.rmtree(a_ )
# Save tokenizer rust, legacy_format=False
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : Dict = tokenizer_r.save_pretrained(a_, legacy_format=a_ )
_snake_case : Optional[Any] = tokenizer_p.save_pretrained(a_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_snake_case : Dict = tokenizer_r.from_pretrained(a_ )
_snake_case : int = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_, a_ ) )
shutil.rmtree(a_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase( unittest.TestCase ):
'''simple docstring'''
lowercase__ = "facebook/mbart-large-50-one-to-many-mmt"
lowercase__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowercase__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowercase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCamelCase_ ( cls: Optional[Any] ):
'''simple docstring'''
_snake_case : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="""en_XX""", tgt_lang="""ro_RO""" )
_snake_case : Union[str, Any] = 1
return cls
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""], 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""], 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""], 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""], 250_038 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
self.assertIn(a_, self.tokenizer.all_special_ids )
_snake_case : Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
_snake_case : Dict = self.tokenizer.decode(a_, skip_special_tokens=a_ )
_snake_case : Tuple = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=a_ )
self.assertEqual(a_, a_ )
self.assertNotIn(self.tokenizer.eos_token, a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0], a_ )
_snake_case : Any = 10
_snake_case : List[Any] = self.tokenizer(a_, max_length=a_, truncation=a_ ).input_ids[0]
self.assertEqual(ids[0], a_ )
self.assertEqual(ids[-1], 2 )
self.assertEqual(len(a_ ), a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ), [250_053, 250_001] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a_ )
_snake_case : Any = MBartaaTokenizer.from_pretrained(a_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, a_ )
@require_torch
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=a_, return_tensors="""pt""" )
_snake_case : Any = shift_tokens_right(batch["""labels"""], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=a_, truncation=a_, max_length=len(self.expected_src_tokens ), return_tensors="""pt""", )
_snake_case : List[Any] = shift_tokens_right(batch["""labels"""], self.tokenizer.pad_token_id )
self.assertIsInstance(a_, a_ )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
_snake_case : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, a_ )
self.assertEqual(2, batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.tokenizer(self.src_text, padding=a_, truncation=a_, max_length=3, return_tensors="""pt""" )
_snake_case : int = self.tokenizer(
text_target=self.tgt_text, padding=a_, truncation=a_, max_length=10, return_tensors="""pt""" )
_snake_case : Union[str, Any] = targets["""input_ids"""]
_snake_case : Optional[int] = shift_tokens_right(a_, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Tuple = self.tokenizer._build_translation_inputs(
"""A test""", return_tensors="""pt""", src_lang="""en_XX""", tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(a_ ), {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
}, )
| 64 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]:
'''simple docstring'''
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
_UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1
_UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple
_UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_UpperCAmelCase = logits[0, masked_index, :]
_UpperCAmelCase = logits.softmax(dim=0 )
_UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 )
_UpperCAmelCase = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] )
_UpperCAmelCase = tokenizer.mask_token
_UpperCAmelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
_UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(_UpperCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base")
UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
UpperCAmelCase__ = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 339 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A :
def __init__(self : Optional[Any] , __UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = data
UpperCAmelCase__ = [0X67_452_301, 0XEF_CDA_B89, 0X98_BAD_CFE, 0X10_325_476, 0XC3_D2E_1F0]
@staticmethod
def lowercase_ (__UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
return ((n << b) | (n >> (3_2 - b))) & 0XFF_FFF_FFF
def lowercase_ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = b"\x80" + b"\x00" * (6_3 - (len(self.data ) + 8) % 6_4)
UpperCAmelCase__ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def lowercase_ (self : List[str] ) -> List[Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = list(struct.unpack(">16L" , __UpperCAmelCase ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
UpperCAmelCase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def lowercase_ (self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.padding()
UpperCAmelCase__ = self.split_blocks()
for block in self.blocks:
UpperCAmelCase__ = self.expand_block(__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
UpperCAmelCase__ = (b & c) | ((~b) & d)
UpperCAmelCase__ = 0X5A_827_999
elif 2_0 <= i < 4_0:
UpperCAmelCase__ = b ^ c ^ d
UpperCAmelCase__ = 0X6E_D9E_BA1
elif 4_0 <= i < 6_0:
UpperCAmelCase__ = (b & c) | (b & d) | (c & d)
UpperCAmelCase__ = 0X8F_1BB_CDC
elif 6_0 <= i < 8_0:
UpperCAmelCase__ = b ^ c ^ d
UpperCAmelCase__ = 0XCA_62C_1D6
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = (
self.rotate(__UpperCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0XFF_FFF_FFF,
a,
self.rotate(__UpperCAmelCase , 3_0 ),
c,
d,
)
UpperCAmelCase__ = (
self.h[0] + a & 0XFF_FFF_FFF,
self.h[1] + b & 0XFF_FFF_FFF,
self.h[2] + c & 0XFF_FFF_FFF,
self.h[3] + d & 0XFF_FFF_FFF,
self.h[4] + e & 0XFF_FFF_FFF,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = B"Test String"
assert SHAaHash(__A ).final_hash() == hashlib.shaa(__A ).hexdigest() # noqa: S324
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument("--file", dest="input_file", help="Hash contents of a file" )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
UpperCAmelCase__ = f.read()
else:
UpperCAmelCase__ = bytes(__A, "utf-8" )
print(SHAaHash(__A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 65 |
import math
import unittest
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
with self.assertRaises(A):
is_prime(-19)
self.assertFalse(
is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 339 | 0 |
"""simple docstring"""
from math import factorial
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(_lowercase ) // (factorial(_lowercase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"If a class of 40 students must be arranged into groups of",
F"""4 for group projects, there are {combinations(40, 4)} ways""",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
F"""are {combinations(10, 3)} ways that first, second and""",
"third place can be awarded.",
)
| 66 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : str) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A , hypotheses=A , min_len=A , max_len=A)
}
| 339 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ) -> tuple[int, float, str]:
__lowerCamelCase = cipher_alphabet or [chr(UpperCamelCase__ ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowerCamelCase = {
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
__lowerCamelCase = frequencies_dict
if not case_sensitive:
__lowerCamelCase = ciphertext.lower()
# Chi squared statistic values
__lowerCamelCase = {}
# cycle through all of the shifts
for shift in range(len(UpperCamelCase__ ) ):
__lowerCamelCase = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowerCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCamelCase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowerCamelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowerCamelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCamelCase = decrypted_with_shift.lower().count(UpperCamelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowerCamelCase = decrypted_with_shift.count(UpperCamelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowerCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowerCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowerCamelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCamelCase__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowerCamelCase = min(
UpperCamelCase__ , key=UpperCamelCase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 67 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 339 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'switch_transformers'
__lowerCamelCase = ['past_key_values']
__lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , lowercase=32128 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=3 , lowercase=12 , lowercase=3 , lowercase=12 , lowercase=8 , lowercase=False , lowercase=0.01 , lowercase="float32" , lowercase=False , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1e-6 , lowercase=0.001 , lowercase=0.001 , lowercase=1.0 , lowercase="relu" , lowercase=True , lowercase=False , lowercase=True , lowercase=0 , lowercase=1 , **lowercase , ) -> Tuple:
'''simple docstring'''
A__ = vocab_size
A__ = d_model
A__ = d_kv
A__ = d_ff
A__ = num_sparse_encoder_layers
A__ = num_layers
A__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A__ = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A__ = self.num_layers // self.num_sparse_encoder_layers
else:
A__ = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A__ = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A__ = self.num_decoder_layers # HACK: this will create 0 sparse layers
A__ = num_heads
A__ = num_experts
A__ = expert_capacity
A__ = router_bias
A__ = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
A__ = router_dtype
A__ = router_ignore_padding_tokens
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = dropout_rate
A__ = layer_norm_epsilon
A__ = initializer_factor
A__ = feed_forward_proj
A__ = use_cache
A__ = add_router_probs
A__ = router_z_loss_coef
A__ = router_aux_loss_coef
A__ = self.feed_forward_proj.split("-" )
A__ = act_info[-1]
A__ = act_info[0] == "gated"
if len(lowercase ) > 1 and act_info[0] != "gated" or len(lowercase ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A__ = "gelu_new"
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , **lowercase , )
| 68 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''open-llama'''
def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , A)
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}")
_UpperCAmelCase = self.rope_scaling.get('type' , A)
_UpperCAmelCase = self.rope_scaling.get('factor' , A)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 339 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=32, lowerCAmelCase__=2, lowerCAmelCase__=3, lowerCAmelCase__=16, lowerCAmelCase__=[32, 64, 128], lowerCAmelCase__=[1, 2, 1], lowerCAmelCase__=[2, 2, 4], lowerCAmelCase__=2, lowerCAmelCase__=2.0, lowerCAmelCase__=True, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__="gelu", lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-5, lowerCAmelCase__=True, lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__=10, lowerCAmelCase__=8, lowerCAmelCase__=["stage1", "stage2"], lowerCAmelCase__=[1, 2], ) -> Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = patch_norm
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = is_training
snake_case_ = scope
snake_case_ = use_labels
snake_case_ = type_sequence_label_size
snake_case_ = encoder_stride
snake_case_ = out_features
snake_case_ = out_indices
def a_ ( self) -> Dict:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size], self.type_sequence_label_size)
snake_case_ = self.get_config()
return config, pixel_values, labels
def a_ ( self) -> int:
return FocalNetConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, hidden_sizes=self.hidden_sizes, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = FocalNetModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__)
snake_case_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
snake_case_ = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = FocalNetBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, config.hidden_sizes[:-1])
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = FocalNetBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels), 1)
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]])
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
snake_case_ = FocalNetForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
snake_case_ = 1
snake_case_ = FocalNetForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
snake_case_ = self.type_sequence_label_size
snake_case_ = FocalNetForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__, labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
snake_case_ = 1
snake_case_ = FocalNetForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def a_ ( self) -> Any:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> Any:
snake_case_ = FocalNetModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, embed_dim=37, has_text_modality=lowerCAmelCase__)
def a_ ( self) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self) -> List[str]:
return
def a_ ( self) -> Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__)
def a_ ( self) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def a_ ( self) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@unittest.skip(reason='FocalNet does not use inputs_embeds')
def a_ ( self) -> Optional[Any]:
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking')
def a_ ( self) -> int:
pass
def a_ ( self) -> List[Any]:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case_ = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__, nn.Linear))
def a_ ( self) -> str:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Any:
snake_case_ = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
snake_case_ = outputs.hidden_states
snake_case_ = getattr(
self.model_tester, 'expected_num_hidden_layers', len(self.model_tester.depths) + 1)
self.assertEqual(len(lowerCAmelCase__), lowerCAmelCase__)
# FocalNet has a different seq_length
snake_case_ = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], )
snake_case_ = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__), lowerCAmelCase__)
snake_case_ , snake_case_ , snake_case_ , snake_case_ = reshaped_hidden_states[0].shape
snake_case_ = (
reshaped_hidden_states[0].view(lowerCAmelCase__, lowerCAmelCase__, height * width).permute(0, 2, 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], )
def a_ ( self) -> Tuple:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
snake_case_ = True
self.check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
snake_case_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
snake_case_ = True
self.check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, (padded_height, padded_width))
@slow
def a_ ( self) -> List[str]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = FocalNetModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(lowerCAmelCase__)
for model_class in self.all_model_classes:
snake_case_ = model_class(config=lowerCAmelCase__)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized', )
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def a_ ( self) -> Union[str, Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny') if is_vision_available() else None
@slow
def a_ ( self) -> str:
snake_case_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny').to(lowerCAmelCase__)
snake_case_ = self.default_image_processor
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
snake_case_ = image_processor(images=lowerCAmelCase__, return_tensors='pt').to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
snake_case_ = model(**lowerCAmelCase__)
# verify the logits
snake_case_ = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, lowerCAmelCase__)
snake_case_ = torch.tensor([0.2166, -0.4368, 0.2191]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase__, atol=1e-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item(), 281)
@require_torch
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (FocalNetBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = FocalNetConfig
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> int:
snake_case_ = FocalNetModelTester(self)
| 69 |
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 339 | 0 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
A__ : Optional[Any] =Lock()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCAmelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowerCAmelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowerCAmelCase = min(lowerCAmelCase , lowerCAmelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCAmelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowerCAmelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowerCAmelCase = max(lowerCAmelCase , lowerCAmelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowerCAmelCase = Pipe()
_lowerCAmelCase = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowerCAmelCase = temp_rs
_lowerCAmelCase = temp_rr
for i in range(1 , len(lowerCAmelCase ) - 1 ):
_lowerCAmelCase = Pipe()
_lowerCAmelCase = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowerCAmelCase = temp_rs
_lowerCAmelCase = temp_rr
process_array_.append(
Process(
target=lowerCAmelCase , args=(
len(lowerCAmelCase ) - 1,
arr[len(lowerCAmelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCAmelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCAmelCase ) ):
_lowerCAmelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*lowerCAmelCase )
_lowerCAmelCase = odd_even_transposition(lowerCAmelCase )
print("""Sorted List\n""" )
print(*lowerCAmelCase )
if __name__ == "__main__":
main()
| 70 |
from functools import reduce
UpperCAmelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( _UpperCAmelCase : str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Tuple =CycleDiffusionPipeline
UpperCamelCase__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
UpperCamelCase__ : Any =PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCamelCase__ : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
UpperCamelCase__ : Optional[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : List[Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__UpperCamelCase : int =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCamelCase : Any =CLIPTextModel(lowerCamelCase__ )
__UpperCamelCase : Any =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase : str ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : List[str] =image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : int =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Tuple =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] ={
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Tuple =self.get_dummy_components()
__UpperCamelCase : Optional[Any] =CycleDiffusionPipeline(**lowerCamelCase__ )
__UpperCamelCase : int =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Dict =pipe(**lowerCamelCase__ )
__UpperCamelCase : int =output.images
__UpperCamelCase : Optional[int] =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCamelCase : Any =np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCamelCase__ , 'half' ):
__UpperCamelCase : List[str] =module.half()
__UpperCamelCase : int =CycleDiffusionPipeline(**lowerCamelCase__ )
__UpperCamelCase : Any =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : int =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =pipe(**lowerCamelCase__ )
__UpperCamelCase : Dict =output.images
__UpperCamelCase : int =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCamelCase : Optional[int] =np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowercase ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def __lowercase ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def __lowercase ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowercase ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def __lowercase ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__UpperCamelCase : Union[str, Any] =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
__UpperCamelCase : Union[str, Any] =init_image.resize((512, 512) )
__UpperCamelCase : Dict ='CompVis/stable-diffusion-v1-4'
__UpperCamelCase : Optional[int] =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='scheduler' )
__UpperCamelCase : List[str] =CycleDiffusionPipeline.from_pretrained(
lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : str ='A black colored car'
__UpperCamelCase : Optional[int] ='A blue colored car'
__UpperCamelCase : Optional[int] =torch.manual_seed(0 )
__UpperCamelCase : Dict =pipe(
prompt=lowerCamelCase__ , source_prompt=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase__ , output_type='np' , )
__UpperCamelCase : str =output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__UpperCamelCase : int =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
__UpperCamelCase : Optional[int] =init_image.resize((512, 512) )
__UpperCamelCase : Optional[int] ='CompVis/stable-diffusion-v1-4'
__UpperCamelCase : Any =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='scheduler' )
__UpperCamelCase : Optional[Any] =CycleDiffusionPipeline.from_pretrained(lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : Union[str, Any] ='A black colored car'
__UpperCamelCase : Optional[Any] ='A blue colored car'
__UpperCamelCase : str =torch.manual_seed(0 )
__UpperCamelCase : List[Any] =pipe(
prompt=lowerCamelCase__ , source_prompt=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase__ , output_type='np' , )
__UpperCamelCase : Any =output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 71 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for row in range(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = matrix[row][col]
_UpperCAmelCase = vector[row][0]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCAmelCase ):
_UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCAmelCase ):
for row in range(_UpperCAmelCase ):
_UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase )
]
def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for x_val, y_val in enumerate(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase = y_val
_UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase )
def interpolated_func(_UpperCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCAmelCase ) )
return interpolated_func
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int:
'''simple docstring'''
_UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase = 0
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for poly in polynomials:
_UpperCAmelCase = 1
while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ):
x_val += 1
ret += poly(_UpperCAmelCase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = tempfile.mkdtemp()
_lowerCamelCase : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_lowerCamelCase : List[str] = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_lowerCamelCase : str = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__lowerCAmelCase : int ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_lowerCamelCase : Dict = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[Any] = self.get_rust_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : List[Any] = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCamelCase : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
_lowerCamelCase : str = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCamelCase : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_lowerCamelCase : Tuple = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_lowerCamelCase : int = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[int] = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.prepare_image_inputs()
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : Tuple = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : List[str] = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : List[Any] = '''lower newer'''
_lowerCamelCase : Tuple = processor(text=__lowerCAmelCase )
_lowerCamelCase : str = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=6_4 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : str = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''lower newer'''
_lowerCamelCase : int = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Optional[int] = processor.batch_decode(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = '''lower newer'''
_lowerCamelCase : Tuple = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 72 |
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] ) -> bool:
'''simple docstring'''
return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = ['''pixel_values''']
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = size if size is not None else {'shortest_edge': 3_8_4}
__lowerCamelCase : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = do_resize
__lowerCamelCase : Optional[Any] = size
# Default value set here for backwards compatibility where the value in config is None
__lowerCamelCase : Optional[int] = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
__lowerCamelCase : str = resample
__lowerCamelCase : Optional[int] = do_rescale
__lowerCamelCase : int = rescale_factor
__lowerCamelCase : Union[str, Any] = do_normalize
__lowerCamelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : float ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
__lowerCamelCase : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
if "shortest_edge" not in size:
raise ValueError(F"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}")
__lowerCamelCase : List[str] = size['shortest_edge']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__lowerCamelCase : Tuple = int(shortest_edge / crop_pct)
__lowerCamelCase : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE__ ,size=(shortest_edge, shortest_edge) ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE__ ,size=(shortest_edge, shortest_edge) ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : int ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : Any ,):
__lowerCamelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : str = crop_pct if crop_pct is not None else self.crop_pct
__lowerCamelCase : int = resample if resample is not None else self.resample
__lowerCamelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : str = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : Any = image_std if image_std is not None else self.image_std
__lowerCamelCase : List[Any] = size if size is not None else self.size
__lowerCamelCase : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = make_list_of_images(SCREAMING_SNAKE_CASE__)
if not valid_images(SCREAMING_SNAKE_CASE__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__lowerCamelCase : Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE__) for image in images]
if do_resize:
__lowerCamelCase : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,crop_pct=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__) for image in images]
if do_rescale:
__lowerCamelCase : List[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__) for image in images]
if do_normalize:
__lowerCamelCase : Union[str, Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : List[str] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : Tuple = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__)
| 73 |
import os
UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(_UpperCAmelCase ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase )
_UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase )
savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Dict ):
A = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
A = F'{src_lang}-{tgt_lang}'
A = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(snake_case__ , exist_ok=snake_case__ )
A = os.path.join(snake_case__ , 'README.md' )
print(F'Generating {path}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(snake_case__ )
# make sure we are under the root of the project
_lowercase = Path(__file__).resolve().parent.parent.parent
_lowercase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowercase , _lowercase , _lowercase = model_name.split('''-''')
_lowercase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 74 |
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' )
_UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
_UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 339 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def a_ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[Any]=None , **__snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[x.strip() for x in open(__snake_case ).readlines()]
lowerCamelCase_ =[x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
lowerCamelCase_ =calculate_rouge(__snake_case , __snake_case , **__snake_case )
if save_path is not None:
save_json(__snake_case , __snake_case , indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 75 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerModelTester(self)
@slow
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A)
_UpperCAmelCase = model(np.ones((1, 1)))
self.assertIsNotNone(A)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
_UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]])
_UpperCAmelCase = model(A)[0]
_UpperCAmelCase = 5_00_00
_UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , A)
_UpperCAmelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
| 339 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Dict = HfArgumentParser(_a)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE : Any = TensorFlowBenchmark(args=_a)
try:
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
SCREAMING_SNAKE_CASE : Dict = " ".join(str(_a).split(" ")[:-1])
SCREAMING_SNAKE_CASE : Any = ""
SCREAMING_SNAKE_CASE : List[str] = eval(str(_a).split(" ")[-1])
SCREAMING_SNAKE_CASE : str = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(_a)
if len(_a) > 0:
SCREAMING_SNAKE_CASE : int = full_error_msg + begin_error_msg + str(_a)
raise ValueError(_a)
benchmark.run()
if __name__ == "__main__":
main() | 76 |
UpperCAmelCase__ = {}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_UpperCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 )
_UpperCAmelCase = state_late + state_absent + state_ontime
_UpperCAmelCase = prizestrings
return prizestrings
def A ( _UpperCAmelCase : int = 30 ) -> int:
'''simple docstring'''
return _calculate(_UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 339 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True})
lowerCamelCase__ : ClassVar[Features] = Features({"text": Value("string")})
lowerCamelCase__ : ClassVar[Features] = Features({})
lowerCamelCase__ : str = "text"
@property
def _UpperCAmelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 77 |
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers")
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = find_backend(' if not is_torch_available():')
self.assertEqual(A , 'torch')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(A , 'torch_and_transformers')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCAmelCase = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(A , 'torch_and_transformers_and_onnx')
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A)
self.assertIn('torch_and_transformers' , A)
self.assertIn('flax_and_transformers' , A)
self.assertIn('torch_and_transformers_and_onnx' , A)
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'])
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'])
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'])
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(A , '\nCONSTANT = None\n')
_UpperCAmelCase = create_dummy_object('function' , '\'torch\'')
self.assertEqual(
A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
_UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(A , A)
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , A)
| 339 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :int , lowercase_ :MutableSequence[float] ) -> None:
if len(lowercase_ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = degree
def __add__( self :List[str] , lowercase_ :Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowercase_ )
else:
UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowercase_ )
def __sub__( self :str , lowercase_ :Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self :Optional[int] ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self :int , lowercase_ :Polynomial ) -> Polynomial:
UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :int | float ) -> int | float:
UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self :List[Any] ) -> str:
UpperCAmelCase = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase_ )
return polynomial
def __repr__( self :int ) -> str:
return self.__str__()
def UpperCAmelCase__ ( self :int ) -> Polynomial:
UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowercase_ )
def UpperCAmelCase__ ( self :str , lowercase_ :int | float = 0 ) -> Polynomial:
UpperCAmelCase = [0] * (self.degree + 2)
UpperCAmelCase = constant
for i in range(self.degree + 1 ):
UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowercase_ )
def __eq__( self :Dict , lowercase_ :object ) -> bool:
if not isinstance(lowercase_ , lowercase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self :List[Any] , lowercase_ :object ) -> bool:
return not self.__eq__(lowercase_ )
| 78 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
UpperCamelCase = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def A ( ) -> Optional[int]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
_UpperCAmelCase = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase = raw_datasets['train'].features['label'].names
_UpperCAmelCase = len(_UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , )
_UpperCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase = {'Refused': 0, 'Entailed': 1}
_UpperCAmelCase = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ):
_UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase = examples['statement']
_UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase )
_UpperCAmelCase = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_UpperCAmelCase = raw_datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCAmelCase = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCAmelCase = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_UpperCAmelCase = raw_datasets['test']
if data_args.max_predict_samples is not None:
_UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase : EvalPrediction ):
_UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 )
else:
_UpperCAmelCase = None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCAmelCase )
trainer.save_metrics('train' , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase )
_UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase = predict_dataset.remove_columns('label' )
_UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
_UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = label_list[item]
writer.write(F"{index}\t{item}\n" )
_UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 339 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __lowercase ( __lowercase , __lowercase , __lowercase = 10**-10 ) -> float:
'''simple docstring'''
_A = a
while True:
_A = Decimal(__lowercase ) - (
Decimal(eval(__lowercase ) ) / Decimal(eval(str(diff(__lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowercase ) ) < precision: # noqa: S307
return float(__lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 79 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase = multiprocessing.Manager()
_UpperCAmelCase = manager.list()
_UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCAmelCase = shutil.rmtree
_UpperCAmelCase = os.rmdir
_UpperCAmelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCAmelCase = {}
with swallow_io():
with time_limit(_UpperCAmelCase ):
exec(_UpperCAmelCase , _UpperCAmelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
_UpperCAmelCase = rmtree
_UpperCAmelCase = rmdir
_UpperCAmelCase = chdir
@contextlib.contextmanager
def A ( _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase )
signal.signal(signal.SIGALRM , _UpperCAmelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCAmelCase ):
with contextlib.redirect_stderr(_UpperCAmelCase ):
with redirect_stdin(_UpperCAmelCase ):
yield
@contextlib.contextmanager
def A ( ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCAmelCase ):
yield dirname
class __lowerCAmelCase ( A ):
pass
class __lowerCAmelCase ( io.StringIO ):
def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]:
"""simple docstring"""
return False
class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore
UpperCamelCase = '''stdin'''
@contextlib.contextmanager
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if root == ".":
yield
return
_UpperCAmelCase = os.getcwd()
os.chdir(_UpperCAmelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str]=None ) -> Any:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCAmelCase = None
_UpperCAmelCase = None
import os
_UpperCAmelCase = '1'
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import shutil
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import subprocess
_UpperCAmelCase = None # type: ignore
_UpperCAmelCase = None
import sys
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
| 339 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Union[str, Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str:
'''simple docstring'''
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False)
def A ( _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
return unittest.skip('Test was skipped' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict:
'''simple docstring'''
if test_case is None:
return partial(_UpperCAmelCase , version=_UpperCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase )
UpperCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase )
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]) -> str:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A)
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = AcceleratorState()
_UpperCAmelCase = tensor[None].clone().to(state.device )
_UpperCAmelCase = gather(_UpperCAmelCase ).cpu()
_UpperCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCAmelCase ):
return False
return True
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_UpperCAmelCase )
else:
break
async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(_UpperCAmelCase )
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput:
'''simple docstring'''
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) )
_UpperCAmelCase = ' '.join(_UpperCAmelCase )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class __lowerCAmelCase ( A ):
pass
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
_UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCAmelCase , 'decode' ):
_UpperCAmelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 339 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Any = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = """▁"""
lowerCamelCase_ : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCamelCase_ : Any = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCamelCase_ : Tuple = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A = None , **__A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
a ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
a =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a =1
a =len(self.sp_model ) + self.fairseq_offset
a ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
a =self.__dict__.copy()
a =None
a =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __A ) -> List[Any]:
a =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a ={}
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a =[self.cls_token_id]
a =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a =self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[str]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
a =''''''.join(__A ).replace(__A , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
a =self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,) | 81 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( _UpperCAmelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
_UpperCAmelCase = 0
return None
def A ( _UpperCAmelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 339 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = DiTPipeline
__lowerCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__lowerCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_snake_case , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_snake_case , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_snake_case , 1e-3 )
def snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_snake_case , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(_snake_case )
_lowerCAmelCase = pipe(_snake_case , generator=_snake_case , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(_snake_case , _snake_case ):
_lowerCAmelCase = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(_snake_case )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_snake_case , generator=_snake_case , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(_snake_case , _snake_case ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 82 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]:
"""simple docstring"""
import nltk
nltk.download('wordnet')
if NLTK_VERSION >= version.Version('3.6.5'):
nltk.download('punkt')
if NLTK_VERSION >= version.Version('3.6.6'):
nltk.download('omw-1.4')
def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any:
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5'):
_UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
else:
_UpperCAmelCase = [
meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
return {"meteor": np.mean(A)}
| 339 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = Dict[str, Any]
snake_case_ : Union[str, Any] = List[Prediction]
@add_end_docstrings(lowercase )
class lowercase__ ( lowercase ):
def __init__( self : Any ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self ,'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase_ ( self : str ,**lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = {}
if "threshold" in kwargs:
_UpperCamelCase : Optional[Any] = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self : Optional[Any] ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : List[str] ):
'''simple docstring'''
return super().__call__(*lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = load_image(lowerCamelCase__ )
_UpperCamelCase : List[Any] = torch.IntTensor([[image.height, image.width]] )
_UpperCamelCase : int = self.image_processor(images=[image] ,return_tensors='pt' )
if self.tokenizer is not None:
_UpperCamelCase : int = self.tokenizer(text=inputs['words'] ,boxes=inputs['boxes'] ,return_tensors='pt' )
_UpperCamelCase : List[Any] = target_size
return inputs
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = model_inputs.pop('target_size' )
_UpperCamelCase : Optional[int] = self.model(**lowerCamelCase__ )
_UpperCamelCase : Any = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
_UpperCamelCase : Dict = model_inputs['bbox']
return model_outputs
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any=0.9 ):
'''simple docstring'''
_UpperCamelCase : List[str] = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = target_size[0].tolist()
def unnormalize(lowerCamelCase__ : Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
_UpperCamelCase , _UpperCamelCase : List[Any] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_UpperCamelCase : Any = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_UpperCamelCase : int = [unnormalize(lowerCamelCase__ ) for bbox in model_outputs['bbox'].squeeze(0 )]
_UpperCamelCase : Any = ['score', 'label', 'box']
_UpperCamelCase : Tuple = [dict(zip(lowerCamelCase__ ,lowerCamelCase__ ) ) for vals in zip(scores.tolist() ,lowerCamelCase__ ,lowerCamelCase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_UpperCamelCase : Dict = self.image_processor.post_process_object_detection(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : int = raw_annotations[0]
_UpperCamelCase : Tuple = raw_annotation['scores']
_UpperCamelCase : Union[str, Any] = raw_annotation['labels']
_UpperCamelCase : int = raw_annotation['boxes']
_UpperCamelCase : Optional[Any] = scores.tolist()
_UpperCamelCase : Tuple = [self.model.config.idalabel[label.item()] for label in labels]
_UpperCamelCase : int = [self._get_bounding_box(lowerCamelCase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_UpperCamelCase : Union[str, Any] = ['score', 'label', 'box']
_UpperCamelCase : Tuple = [
dict(zip(lowerCamelCase__ ,lowerCamelCase__ ) )
for vals in zip(raw_annotation['scores'] ,raw_annotation['labels'] ,raw_annotation['boxes'] )
]
return annotation
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = box.int().tolist()
_UpperCamelCase : List[Any] = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 83 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase__ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def A ( _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
print(F"{key} -> {new_key}" )
_UpperCAmelCase = s_dict.pop(_UpperCAmelCase )
return s_dict
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_UpperCAmelCase = emb.weight.data
return lin_layer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase = os.path.basename(_UpperCAmelCase )
_UpperCAmelCase = url.split('/' )[-2]
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop:
while True:
_UpperCAmelCase = source.read(8_192 )
if not buffer:
break
output.write(_UpperCAmelCase )
loop.update(len(_UpperCAmelCase ) )
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
_UpperCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
_UpperCAmelCase = original_checkpoint['dims']
_UpperCAmelCase = original_checkpoint['model_state_dict']
_UpperCAmelCase = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(_UpperCAmelCase )
rename_keys(_UpperCAmelCase )
_UpperCAmelCase = True
_UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0]
_UpperCAmelCase = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
_UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F" but all the following weights are missing {missing}" )
if tie_embeds:
_UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase = proj_out_weights
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 339 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :str = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self ) -> str:
with self.assertRaises(__A ):
lowerCAmelCase_ :Any = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __lowerCAmelCase ( self ) -> str:
with self.assertRaises(__A ):
lowerCAmelCase_ :Any = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self ) -> Dict:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCAmelCase_ :Optional[int] = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Any = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :int = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCAmelCase_ :List[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :str = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __lowerCAmelCase ( self ) -> Optional[Any]:
import PIL.Image
lowerCAmelCase_ :Dict = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__A ) as mock_cast_to_python_objects:
lowerCAmelCase_ :Dict = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __A )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :str = pa.BufferReader(lowercase__ ) if isinstance(lowercase__ , pa.Buffer ) else pa.memory_map(lowercase__ )
lowerCAmelCase_ :Any = pa.ipc.open_stream(lowercase__ )
lowerCAmelCase_ :pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _snake_case ( lowercase__ : int , lowercase__ : str ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Tuple = pa.BufferOutputStream()
lowerCAmelCase_ :Any = pa.schema(lowercase__ ) if fields else None
with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCAmelCase_ , lowerCAmelCase_ :Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ :str = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Dict = pa.BufferOutputStream()
lowerCAmelCase_ :Dict = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=lowercase__ , features=lowercase__ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
lowerCAmelCase_ , lowerCAmelCase_ :str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCAmelCase_ :List[str] = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ :Tuple = pa.ipc.open_stream(lowercase__ )
lowerCAmelCase_ :pa.Table = f.read_all()
lowerCAmelCase_ :List[str] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowercase__ )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _snake_case ( lowercase__ : Any ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="""split_name""" , check_duplicates=lowercase__ , ) as writer:
with pytest.raises(lowercase__ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _snake_case ( lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="""split_name""" , check_duplicates=lowercase__ , ) as writer:
with pytest.raises(lowercase__ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _snake_case ( lowercase__ : Tuple ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="""split_name""" , check_duplicates=lowercase__ , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Dict = pa.BufferOutputStream()
lowerCAmelCase_ :str = pa.schema(lowercase__ ) if fields else None
with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ :Optional[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = pa.BufferOutputStream()
lowerCAmelCase_ :Optional[Any] = pa.schema(lowercase__ ) if fields else None
with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ :Dict = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :int = pa.BufferOutputStream()
lowerCAmelCase_ :Dict = pa.schema(lowercase__ ) if fields else None
with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ :str = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ :List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
lowerCAmelCase_ :Any = os.path.join(lowercase__ , """test.arrow""" )
with ArrowWriter(path=lowercase__ , schema=pa.schema(lowercase__ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(lowercase__ , 1 )
def _snake_case ( lowercase__ : Optional[int] ) -> int:
'''simple docstring'''
if pa.types.is_list(lowercase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lst[0] , lowercase__ ):
change_first_primitive_element_in_list(lst[0] , lowercase__ )
else:
lowerCAmelCase_ :Dict = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _snake_case ( lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Dict ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = pa.array(TypedSequence(lowercase__ , optimized_int_type=lowercase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _snake_case ( lowercase__ : int , lowercase__ : List[str] , lowercase__ : int ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = pa.array(OptimizedTypedSequence(lowercase__ , col=lowercase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCAmelCase_ :int = copy.deepcopy(lowercase__ )
lowerCAmelCase_ :Tuple = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[Any] = pa.array(OptimizedTypedSequence(lowercase__ , col=lowercase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _snake_case ( lowercase__ : int , lowercase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=lowercase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _snake_case ( lowercase__ : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Tuple = """mock://dataset-train.arrow"""
with ArrowWriter(path=lowercase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowercase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCAmelCase_ , lowerCAmelCase_ :str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowercase__ )
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = pa.BufferOutputStream()
with ParquetWriter(stream=lowercase__ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCAmelCase_ :Any = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ :pa.Table = pq.read_table(lowercase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _snake_case ( lowercase__ : str , lowercase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
import PIL.Image
lowerCAmelCase_ :int = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowercase__ , format="""png""" )
lowerCAmelCase_ :List[str] = pa.BufferOutputStream()
with ParquetWriter(
stream=lowercase__ , features=Features({"""image""": Image()} ) , embed_local_files=lowercase__ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
lowerCAmelCase_ :str = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ :pa.Table = pq.read_table(lowercase__ )
lowerCAmelCase_ :List[str] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , lowercase__ )
with open(lowercase__ , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = pa.schema([pa.field("""col_1""" , pa.string() , nullable=lowercase__ )] )
lowerCAmelCase_ :Dict = pa.BufferOutputStream()
with ArrowWriter(stream=lowercase__ ) as writer:
writer._build_writer(inferred_schema=lowercase__ )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 84 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCamelCase = None
UpperCamelCase = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
UpperCamelCase = datasets.Audio()
UpperCamelCase = '''audio'''
UpperCamelCase = AudioFolderConfig
UpperCamelCase = 42 # definition at the bottom of the script
UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' )
UpperCAmelCase__ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase__ = AUDIO_EXTENSIONS
| 339 | 0 |
'''simple docstring'''
from math import factorial
def UpperCamelCase_( snake_case : int = 1_0_0 ):
'''simple docstring'''
return sum(map(snake_case , str(factorial(snake_case ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 85 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
def __init__( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = []
def _lowerCamelCase ( self : Any , A : List[str]) -> int:
"""simple docstring"""
return self.node_position[vertex]
def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = pos
def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCAmelCase = 2 * start + 1
else:
_UpperCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child]
_UpperCAmelCase , _UpperCAmelCase = (
heap[start],
positions[start],
)
_UpperCAmelCase , _UpperCAmelCase = temp, tempa
_UpperCAmelCase = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , A)
self.top_to_bottom(A , A , A , A)
def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any:
"""simple docstring"""
_UpperCAmelCase = position[index]
while index != 0:
_UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
_UpperCAmelCase = heap[parent]
_UpperCAmelCase = position[parent]
self.set_position(position[parent] , A)
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , A)
break
_UpperCAmelCase = parent
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , 0)
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = len(A) // 2 - 1
for i in range(A , -1 , -1):
self.top_to_bottom(A , A , len(A) , A)
def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = positions[0]
_UpperCAmelCase = sys.maxsize
self.top_to_bottom(A , 0 , len(A) , A)
return temp
def A ( _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
_UpperCAmelCase = Heap()
_UpperCAmelCase = [0] * len(_UpperCAmelCase )
_UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCAmelCase = []
for vertex in range(len(_UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCAmelCase )
heap.node_position.append(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = 1
_UpperCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCAmelCase = 0
_UpperCAmelCase = distance
heap.heapify(_UpperCAmelCase , _UpperCAmelCase )
for _ in range(1 , len(_UpperCAmelCase ) ):
_UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCAmelCase )]
):
_UpperCAmelCase = distance
heap.bottom_to_top(
_UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 339 | 0 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]:
'''simple docstring'''
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
_UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1
_UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple
_UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_UpperCAmelCase = logits[0, masked_index, :]
_UpperCAmelCase = logits.softmax(dim=0 )
_UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 )
_UpperCAmelCase = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] )
_UpperCAmelCase = tokenizer.mask_token
_UpperCAmelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
_UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(_UpperCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base")
UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
UpperCAmelCase__ = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 339 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Any = []
lowercase__ : Optional[int] = []
lowercase__ : Tuple = []
for rt in rc.restypes:
lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
lowercase__ : Union[str, Any] = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : str = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : List[str] = torch.tensor(
_lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
lowercase__ : str = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = restype_atomaa_mask[protein_aatype]
lowercase__ : List[Any] = residx_atomaa_mask
lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
lowercase__ : Tuple = rc.restype_atoa[restype_letter]
lowercase__ : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ : Optional[int] = rc.atom_order[atom_name]
lowercase__ : Tuple = 1
lowercase__ : Dict = restype_atomaa_mask[protein_aatype]
lowercase__ : Any = residx_atomaa_mask
return protein
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray)
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase))
return out
| 87 |
import math
import unittest
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
with self.assertRaises(A):
is_prime(-19)
self.assertFalse(
is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 339 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[Any]=99 , UpperCamelCase__ : Dict=36 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Dict=6 , UpperCamelCase__ : Optional[Any]=6 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=1000 , ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = text_seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = coordinate_size
__magic_name__ = shape_size
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__magic_name__ = text_seq_length
__magic_name__ = (image_size // patch_size) ** 2 + 1
__magic_name__ = self.text_seq_length + self.image_seq_length
def _lowercase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__magic_name__ = bbox[i, j, 3]
__magic_name__ = bbox[i, j, 1]
__magic_name__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__magic_name__ = bbox[i, j, 2]
__magic_name__ = bbox[i, j, 0]
__magic_name__ = t
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.text_seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__magic_name__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowercase ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = LayoutLMvaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# text + image
__magic_name__ = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ )
__magic_name__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__magic_name__ = model(pixel_values=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = LayoutLMvaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = LayoutLMvaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowercase ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _lowercase ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
return True
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = LayoutLMvaModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
__magic_name__ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCamelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
__magic_name__ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in get_values(UpperCamelCase__ ):
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
return inputs_dict
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = LayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(UpperCamelCase__ )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ )
__magic_name__ = torch.tensor([[1, 2]] )
__magic_name__ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__magic_name__ = model(
input_ids=input_ids.to(UpperCamelCase__ ) , bbox=bbox.to(UpperCamelCase__ ) , pixel_values=pixel_values.to(UpperCamelCase__ ) , )
# verify the logits
__magic_name__ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
__magic_name__ = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 88 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : str) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A , hypotheses=A , min_len=A , max_len=A)
}
| 339 | 0 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : int = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCAmelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : Any ):
torch.manual_seed(0 )
_a : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
_a : Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
torch.manual_seed(0 )
_a : List[str] = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=_UpperCAmelCase ,set_alpha_to_one=_UpperCAmelCase ,)
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : List[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : str = CLIPTextModel(_UpperCAmelCase )
_a : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_a : Optional[Any] = torch.manual_seed(_UpperCAmelCase )
else:
_a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : int = 2
_a : str = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_UpperCAmelCase ,device=torch.device(_UpperCAmelCase ) ,)
_a : int = floats_tensor(control_image.shape ,rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_a : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : List[Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
_a : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowercase ( self : Tuple ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowercase ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowercase ( self : Dict ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : List[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowercase ( self : Tuple ):
torch.manual_seed(0 )
_a : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
def init_weights(_UpperCAmelCase : int ):
if isinstance(_UpperCAmelCase ,torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a : Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_UpperCAmelCase )
torch.manual_seed(0 )
_a : Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(_UpperCAmelCase )
torch.manual_seed(0 )
_a : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=_UpperCAmelCase ,set_alpha_to_one=_UpperCAmelCase ,)
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : Optional[int] = CLIPTextModel(_UpperCAmelCase )
_a : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : str = MultiControlNetModel([controlneta, controlneta] )
_a : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : int=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_a : List[str] = torch.manual_seed(_UpperCAmelCase )
else:
_a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : str = 2
_a : Union[str, Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_UpperCAmelCase ,device=torch.device(_UpperCAmelCase ) ,),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=_UpperCAmelCase ,device=torch.device(_UpperCAmelCase ) ,),
]
_a : str = floats_tensor(control_image[0].shape ,rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_a : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : Optional[Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
_a : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowercase ( self : Optional[int] ):
_a : Dict = self.get_dummy_components()
_a : List[str] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
_a : Any = 10.0
_a : Optional[Any] = 4
_a : Tuple = self.get_dummy_inputs(_UpperCAmelCase )
_a : Optional[Any] = steps
_a : Optional[int] = scale
_a : Any = pipe(**_UpperCAmelCase )[0]
_a : Any = self.get_dummy_inputs(_UpperCAmelCase )
_a : Tuple = steps
_a : List[str] = scale
_a : List[Any] = pipe(**_UpperCAmelCase ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0]
_a : str = self.get_dummy_inputs(_UpperCAmelCase )
_a : Optional[Any] = steps
_a : int = scale
_a : List[str] = pipe(**_UpperCAmelCase ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0]
_a : str = self.get_dummy_inputs(_UpperCAmelCase )
_a : int = steps
_a : str = scale
_a : int = pipe(**_UpperCAmelCase ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowercase ( self : Optional[Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowercase ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowercase ( self : Optional[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowercase ( self : List[str] ):
_a : Optional[int] = self.get_dummy_components()
_a : Dict = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_UpperCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Union[str, Any] ):
_a : Tuple = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
_a : Tuple = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,safety_checker=_UpperCAmelCase ,controlnet=_UpperCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
_a : Union[str, Any] = 'evil space-punk bird'
_a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
_a : Optional[int] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
_a : Optional[Any] = pipe(
_UpperCAmelCase ,_UpperCAmelCase ,control_image=_UpperCAmelCase ,generator=_UpperCAmelCase ,output_type='np' ,num_inference_steps=50 ,strength=0.6 ,)
_a : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
_a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 89 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 339 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
for char in word:
__lowerCamelCase = ord(UpperCamelCase__ )
if not _is_chinese_char(UpperCamelCase__ ):
return 0
return 1
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = set()
for token in tokens:
__lowerCamelCase = len(UpperCamelCase__ ) > 1 and is_chinese(UpperCamelCase__ )
if chinese_word:
word_set.add(UpperCamelCase__ )
__lowerCamelCase = list(UpperCamelCase__ )
return word_list
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : set() ) -> Tuple:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__lowerCamelCase = max([len(UpperCamelCase__ ) for w in chinese_word_set] )
__lowerCamelCase = bert_tokens
__lowerCamelCase , __lowerCamelCase = 0, len(UpperCamelCase__ )
while start < end:
__lowerCamelCase = True
if is_chinese(bert_word[start] ):
__lowerCamelCase = min(end - start , UpperCamelCase__ )
for i in range(UpperCamelCase__ , 1 , -1 ):
__lowerCamelCase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__lowerCamelCase = '##' + bert_word[j]
__lowerCamelCase = start + i
__lowerCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : LTP , UpperCamelCase__ : BertTokenizer ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = []
for i in range(0 , len(UpperCamelCase__ ) , 100 ):
__lowerCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__lowerCamelCase = [get_chinese_word(UpperCamelCase__ ) for r in res]
ltp_res.extend(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
__lowerCamelCase = []
for i in range(0 , len(UpperCamelCase__ ) , 100 ):
__lowerCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
__lowerCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = []
for id in input_ids:
__lowerCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase__ )
input_tokens.append(UpperCamelCase__ )
__lowerCamelCase = add_sub_symbol(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase__ ):
if token[:2] == "##":
__lowerCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase__ ) == 1 and _is_chinese_char(ord(UpperCamelCase__ ) ):
ref_id.append(UpperCamelCase__ )
ref_ids.append(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
return ref_ids
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase = f.readlines()
__lowerCamelCase = [line.strip() for line in data if len(UpperCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__lowerCamelCase = LTP(args.ltp ) # faster in GPU device
__lowerCamelCase = BertTokenizer.from_pretrained(args.bert )
__lowerCamelCase = prepare_ref(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__lowerCamelCase = [json.dumps(UpperCamelCase__ ) + '\n' for ref in ref_ids]
f.writelines(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
__A = parser.parse_args()
main(args)
| 90 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''open-llama'''
def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , A)
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}")
_UpperCAmelCase = self.rope_scaling.get('type' , A)
_UpperCAmelCase = self.rope_scaling.get('factor' , A)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 339 | 0 |
"""simple docstring"""
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = Rectangle(height=0.5 , width=0.5)
SCREAMING_SNAKE_CASE_ : Dict = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
SCREAMING_SNAKE_CASE_ : Any = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Optional[int] = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Optional[int] = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : Optional[int] = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : List[Any] = VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : List[str] = Text('''CPU''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Optional[Any] = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = [mem.copy() for i in range(1)]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : List[str] = Text('''GPU''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Optional[int] = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.align_to(lowercase_ , lowercase_)
gpu.set_x(gpu.get_x() - 1)
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Tuple = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : Optional[int] = Text('''Model''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Tuple = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.play(
Create(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1) , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
SCREAMING_SNAKE_CASE_ : Dict = Square(side_length=2.2)
key.move_to([-5, 2, 0])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_ , run_time=2.5) , Write(lowercase_) , Write(lowercase_))
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : List[str] = []
for i, rect in enumerate(lowercase_):
SCREAMING_SNAKE_CASE_ : Any = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
cpu_target.move_to(lowercase_)
cpu_target.generate_target()
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.46 / 4
SCREAMING_SNAKE_CASE_ : str = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowercase_ , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowercase_ , buff=0.0)
cpu_targs.append(lowercase_)
first_animations.append(rect.animate(run_time=0.5).set_stroke(lowercase_))
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 91 |
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 339 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"""vocab_file""": """sentencepiece.model"""}
UpperCamelCase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
UpperCamelCase__ = {
"""google/rembert""": 256,
}
class a__ ( snake_case__ ):
_a : Optional[int] = VOCAB_FILES_NAMES
_a : Dict = PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _A , _A=False , _A=True , _A=True , _A="[CLS]" , _A="[SEP]" , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , **_A , ):
"""simple docstring"""
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , **_A , )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = remove_space
__lowerCAmelCase = keep_accents
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_A )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return len(self.sp_model )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = d
__lowerCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE( self , _A , _A=False ):
"""simple docstring"""
__lowerCAmelCase = self.sp_model.EncodeAsPieces(_A )
return pieces
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.sp_model.PieceToId(_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.sp_model.IdToPiece(_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.sp_model.decode_pieces(_A )
return out_string
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error("Vocabulary path ({}) should be a directory".format(_A ) )
return
__lowerCAmelCase = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 92 |
from functools import reduce
UpperCAmelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( _UpperCAmelCase : str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
'''simple docstring'''
import math
import sys
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : str = ''''''
try:
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as binary_file:
lowercase_ : str = binary_file.read()
for dat in data:
lowercase_ : Union[str, Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : List[Any] = {'''0''': '''0''', '''1''': '''1'''}
lowercase_ , lowercase_ : int = '''''', ''''''
lowercase_ : Optional[int] = len(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase_ : str = lexicon[curr_string]
result += last_match_id
lowercase_ : Optional[Any] = last_match_id + '''0'''
if math.loga(__SCREAMING_SNAKE_CASE ).is_integer():
lowercase_ : str = {}
for curr_key in list(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = lexicon.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = new_lex
lowercase_ : Union[str, Any] = last_match_id + '''1'''
index += 1
lowercase_ : List[str] = ''''''
return result
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : int = 8
try:
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as opened_file:
lowercase_ : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Tuple = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase_ : int = data_bits[counter:]
lowercase_ : int = data_bits[counter + 1 :]
return data_bits
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[int] = read_file_binary(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = remove_prefix(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = decompress_data(__SCREAMING_SNAKE_CASE )
write_file_binary(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 93 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for row in range(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = matrix[row][col]
_UpperCAmelCase = vector[row][0]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCAmelCase ):
_UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCAmelCase ):
for row in range(_UpperCAmelCase ):
_UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase )
]
def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for x_val, y_val in enumerate(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase = y_val
_UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase )
def interpolated_func(_UpperCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCAmelCase ) )
return interpolated_func
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int:
'''simple docstring'''
_UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase = 0
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for poly in polynomials:
_UpperCAmelCase = 1
while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ):
x_val += 1
ret += poly(_UpperCAmelCase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : Any = '''▁'''
snake_case : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = BigBirdTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
a :Union[str, Any] = self.tokenizer_class(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = '''<s>'''
a :int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_lowerCamelCase ) , 1004 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.test_rust_tokenizer:
return
a :Union[str, Any] = self.get_tokenizer()
a :Dict = self.get_rust_tokenizer()
a :Union[str, Any] = '''I was born in 92000, and this is falsé.'''
a :List[str] = tokenizer.tokenize(_lowerCamelCase )
a :Union[str, Any] = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
a :str = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :str = self.get_rust_tokenizer()
a :Optional[Any] = tokenizer.encode(_lowerCamelCase )
a :Dict = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = BigBirdTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
a :str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] , )
a :List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a :List[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a :Union[str, Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = '''Hello World!'''
a :Optional[int] = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
a :str = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
a :Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
a :Dict = ''' '''.join(_lowerCamelCase )
a :Tuple = self.big_tokenizer.encode_plus(_lowerCamelCase , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase )
a :int = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase )
a :Union[str, Any] = BigBirdConfig(attention_type='''original_full''' )
a :List[str] = BigBirdModel(_lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCamelCase )
model(**_lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
a :str = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# fmt: off
a :List[Any] = {'''input_ids''': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 94 |
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] ) -> bool:
'''simple docstring'''
return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : str = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
for attribute in key.split("." ):
a__ : Tuple =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
a__ : Any =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
a__ : Optional[Any] =hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a__ : int =value
elif weight_type == "weight_g":
a__ : Optional[Any] =value
elif weight_type == "weight_v":
a__ : int =value
elif weight_type == "bias":
a__ : Any =value
else:
a__ : Optional[int] =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
a__ : List[str] =[]
a__ : Dict =fairseq_model.state_dict()
a__ : Union[str, Any] =hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
a__ : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , )
a__ : List[str] =True
else:
for key, mapped_key in MAPPING.items():
a__ : str ="sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a__ : Tuple =True
if "*" in mapped_key:
a__ : str =name.split(SCREAMING_SNAKE_CASE )[0].split("." )[-2]
a__ : List[str] =mapped_key.replace("*" , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
a__ : List[str] ="weight_g"
elif "weight_v" in name:
a__ : Optional[int] ="weight_v"
elif "weight" in name:
a__ : Tuple ="weight"
elif "bias" in name:
a__ : Optional[int] ="bias"
else:
a__ : List[Any] =None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
a__ : int =full_name.split("conv_layers." )[-1]
a__ : Union[str, Any] =name.split("." )
a__ : Tuple =int(items[0] )
a__ : Any =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a__ : Any =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a__ : Any =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a__ : int =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a__ : str =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
a__ : Optional[int] =SEWConfig()
if is_finetuned:
a__ : Optional[Any] =model.wav_encoder.wav_model.cfg
else:
a__ : int =model.cfg
a__ : Optional[Any] =fs_config.conv_bias
a__ : List[Any] =eval(fs_config.conv_feature_layers )
a__ : Dict =[x[0] for x in conv_layers]
a__ : str =[x[1] for x in conv_layers]
a__ : Tuple =[x[2] for x in conv_layers]
a__ : List[str] ="gelu"
a__ : int ="layer" if fs_config.extractor_mode == "layer_norm" else "group"
a__ : Optional[Any] =0.0
a__ : List[str] =fs_config.activation_fn.name
a__ : Union[str, Any] =fs_config.encoder_embed_dim
a__ : Any =0.0_2
a__ : Optional[Any] =fs_config.encoder_ffn_embed_dim
a__ : Tuple =1e-5
a__ : Union[str, Any] =fs_config.encoder_layerdrop
a__ : List[Any] =fs_config.encoder_attention_heads
a__ : int =fs_config.conv_pos_groups
a__ : int =fs_config.conv_pos
a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =fs_config.encoder_layers
a__ : Tuple =fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
a__ : Any =model.cfg
a__ : Optional[Any] =fs_config.final_dropout
a__ : Optional[int] =fs_config.layerdrop
a__ : Optional[Any] =fs_config.activation_dropout
a__ : Dict =fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
a__ : Tuple =fs_config.attention_dropout
a__ : Tuple =fs_config.dropout_input
a__ : List[str] =fs_config.dropout
a__ : Optional[int] =fs_config.mask_channel_length
a__ : List[str] =fs_config.mask_channel_prob
a__ : Any =fs_config.mask_length
a__ : Union[str, Any] =fs_config.mask_prob
a__ : List[str] ="Wav2Vec2FeatureExtractor"
a__ : List[str] ="Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[str]=True ):
"""simple docstring"""
if is_finetuned:
a__ , a__ , a__ : Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
a__ , a__ , a__ : Dict =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
a__ : List[str] =SEWConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
a__ : Optional[int] =convert_config(model[0] , SCREAMING_SNAKE_CASE )
a__ : int =model[0].eval()
a__ : List[str] =True if config.feat_extract_norm == "layer" else False
a__ : Optional[int] =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
if is_finetuned:
if dict_path:
a__ : List[Any] =Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ : Any =target_dict.pad_index
a__ : Any =target_dict.bos_index
a__ : List[Any] =target_dict.pad_index
a__ : Dict =target_dict.bos_index
a__ : Optional[int] =target_dict.eos_index
a__ : Dict =len(target_dict.symbols )
a__ : List[str] =os.path.join(SCREAMING_SNAKE_CASE , "vocab.json" )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=SCREAMING_SNAKE_CASE , )
a__ : List[Any] =WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =SEWForCTC(SCREAMING_SNAKE_CASE )
else:
a__ : int =SEWModel(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 95 |
import os
UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(_UpperCAmelCase ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase )
_UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase )
savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 |
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' )
_UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
_UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 339 | 0 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerModelTester(self)
@slow
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A)
_UpperCAmelCase = model(np.ones((1, 1)))
self.assertIsNotNone(A)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
_UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]])
_UpperCAmelCase = model(A)[0]
_UpperCAmelCase = 5_00_00
_UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , A)
_UpperCAmelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
| 339 | 0 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = StableDiffusionDiffEditPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
snake_case__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case__ = frozenset([] )
def __lowerCAmelCase ( self : int ):
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=lowerCamelCase__ ,)
UpperCAmelCase__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
UpperCAmelCase__ = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=lowerCamelCase__ ,set_alpha_to_zero=lowerCamelCase__ ,)
torch.manual_seed(0 )
UpperCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act='gelu' ,projection_dim=512 ,)
UpperCAmelCase__ = CLIPTextModel(lowerCamelCase__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any]=0 ):
UpperCAmelCase__ = floats_tensor((1, 16, 16) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = floats_tensor((1, 2, 4, 16, 16) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str=0 ):
UpperCAmelCase__ = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = image.cpu().permute(0 ,2 ,3 ,1 )[0]
UpperCAmelCase__ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : Any=0 ):
UpperCAmelCase__ = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = image.cpu().permute(0 ,2 ,3 ,1 )[0]
UpperCAmelCase__ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __lowerCAmelCase ( self : int ):
if not hasattr(self.pipeline_class ,'_optional_components' ):
return
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
UpperCAmelCase__ = pipe(**lowerCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = self.pipeline_class.from_pretrained(lowerCamelCase__ )
pipe_loaded.to(lowerCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase__ ,lowerCamelCase__ ) is None ,f'''`{optional_component}` did not stay set to None after loading.''' ,)
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
UpperCAmelCase__ = pipe_loaded(**lowerCamelCase__ )[0]
UpperCAmelCase__ = np.abs(output - output_loaded ).max()
self.assertLess(lowerCamelCase__ ,1e-4 )
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = self.get_dummy_mask_inputs(lowerCamelCase__ )
UpperCAmelCase__ = pipe.generate_mask(**lowerCamelCase__ )
UpperCAmelCase__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape ,(1, 16, 16) )
UpperCAmelCase__ = np.array([0] * 9 )
UpperCAmelCase__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ ,1e-3 )
self.assertEqual(mask[0, -3, -4] ,0 )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = self.get_dummy_inversion_inputs(lowerCamelCase__ )
UpperCAmelCase__ = pipe.invert(**lowerCamelCase__ ).images
UpperCAmelCase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape ,(2, 32, 32, 3) )
UpperCAmelCase__ = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] ,)
UpperCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ ,1e-3 )
def __lowerCAmelCase ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = {'beta_start': 0.0_0_0_8_5, 'beta_end': 0.0_1_2, 'beta_schedule': 'scaled_linear'}
UpperCAmelCase__ = DPMSolverMultistepScheduler(**lowerCamelCase__ )
UpperCAmelCase__ = DPMSolverMultistepInverseScheduler(**lowerCamelCase__ )
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = self.get_dummy_inversion_inputs(lowerCamelCase__ )
UpperCAmelCase__ = pipe.invert(**lowerCamelCase__ ).images
UpperCAmelCase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape ,(2, 32, 32, 3) )
UpperCAmelCase__ = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] ,)
UpperCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ ,1e-3 )
@require_torch_gpu
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __lowerCAmelCase ( cls : int ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
UpperCAmelCase__ = raw_image.convert('RGB' ).resize((768, 768) )
UpperCAmelCase__ = raw_image
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' ,safety_checker=lowerCamelCase__ ,torch_dtype=torch.floataa )
UpperCAmelCase__ = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 'a bowl of fruit'
UpperCAmelCase__ = 'a bowl of pears'
UpperCAmelCase__ = pipe.generate_mask(
image=self.raw_image ,source_prompt=lowerCamelCase__ ,target_prompt=lowerCamelCase__ ,generator=lowerCamelCase__ ,)
UpperCAmelCase__ = pipe.invert(
prompt=lowerCamelCase__ ,image=self.raw_image ,inpaint_strength=0.7 ,generator=lowerCamelCase__ ).latents
UpperCAmelCase__ = pipe(
prompt=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,image_latents=lowerCamelCase__ ,generator=lowerCamelCase__ ,negative_prompt=lowerCamelCase__ ,inpaint_strength=0.7 ,output_type='numpy' ,).images[0]
UpperCAmelCase__ = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' ,safety_checker=lowerCamelCase__ ,torch_dtype=torch.floataa )
UpperCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 'a bowl of fruit'
UpperCAmelCase__ = 'a bowl of pears'
UpperCAmelCase__ = pipe.generate_mask(
image=self.raw_image ,source_prompt=lowerCamelCase__ ,target_prompt=lowerCamelCase__ ,generator=lowerCamelCase__ ,)
UpperCAmelCase__ = pipe.invert(
prompt=lowerCamelCase__ ,image=self.raw_image ,inpaint_strength=0.7 ,generator=lowerCamelCase__ ,num_inference_steps=25 ,).latents
UpperCAmelCase__ = pipe(
prompt=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,image_latents=lowerCamelCase__ ,generator=lowerCamelCase__ ,negative_prompt=lowerCamelCase__ ,inpaint_strength=0.7 ,num_inference_steps=25 ,output_type='numpy' ,).images[0]
UpperCAmelCase__ = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 98 |
UpperCAmelCase__ = {}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_UpperCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 )
_UpperCAmelCase = state_late + state_absent + state_ontime
_UpperCAmelCase = prizestrings
return prizestrings
def A ( _UpperCAmelCase : int = 30 ) -> int:
'''simple docstring'''
return _calculate(_UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 339 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : List[Any] = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
lowercase : Any = {"""allegro/herbert-base-cased""": 5_1_4}
lowercase : str = {}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Any = VOCAB_FILES_NAMES
__A : str = PRETRAINED_VOCAB_FILES_MAP
__A : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = HerbertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase="</s>" , **lowercase , ) -> List[Any]:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , sep_token=lowercase , **lowercase , )
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : str = [self.cls_token_id]
a__ : Tuple = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self , lowercase , lowercase = None , lowercase = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase)
if token_ids_a is None:
return [1] + ([0] * len(lowercase)) + [1]
return [1] + ([0] * len(lowercase)) + [1] + ([0] * len(lowercase)) + [1]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : Any = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
| 99 |
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers")
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = find_backend(' if not is_torch_available():')
self.assertEqual(A , 'torch')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(A , 'torch_and_transformers')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCAmelCase = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(A , 'torch_and_transformers_and_onnx')
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A)
self.assertIn('torch_and_transformers' , A)
self.assertIn('flax_and_transformers' , A)
self.assertIn('torch_and_transformers_and_onnx' , A)
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'])
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'])
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'])
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(A , '\nCONSTANT = None\n')
_UpperCAmelCase = create_dummy_object('function' , '\'torch\'')
self.assertEqual(
A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
_UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(A , A)
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , A)
| 339 | 0 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__magic_name__ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__magic_name__ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
__magic_name__ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self):
if version.parse(scb.__version__) < version.parse("""1.4.12"""):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ):
__SCREAMING_SNAKE_CASE = len(references[0])
if any(len(lowerCAmelCase__) != references_per_prediction for refs in references):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""")
__SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase__)]
__SCREAMING_SNAKE_CASE = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 100 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
UpperCamelCase = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def A ( ) -> Optional[int]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
_UpperCAmelCase = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase = raw_datasets['train'].features['label'].names
_UpperCAmelCase = len(_UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , )
_UpperCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase = {'Refused': 0, 'Entailed': 1}
_UpperCAmelCase = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ):
_UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase = examples['statement']
_UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase )
_UpperCAmelCase = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_UpperCAmelCase = raw_datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCAmelCase = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCAmelCase = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_UpperCAmelCase = raw_datasets['test']
if data_args.max_predict_samples is not None:
_UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase : EvalPrediction ):
_UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 )
else:
_UpperCAmelCase = None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCAmelCase )
trainer.save_metrics('train' , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase )
_UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase = predict_dataset.remove_columns('label' )
_UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
_UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = label_list[item]
writer.write(F"{index}\t{item}\n" )
_UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 339 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ :Optional[int] = logging.get_logger(__name__)
lowercase__ :Union[str, Any] = torch.device("cpu")
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = dct.pop(lowerCAmelCase__ )
lowercase = val
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = []
for k in state_dict.keys():
lowercase = k
if ".pwconv" in k:
lowercase = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
lowercase = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
lowercase = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
lowercase = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
lowercase = k_new.split('''.''' )
if ls[2].isdigit():
lowercase = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
lowercase = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowercase = 1000
lowercase = '''huggingface/label-files'''
lowercase = '''imagenet-1k-id2label.json'''
lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowercase = [3, 3, 6, 4]
lowercase = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowercase = [3, 3, 9, 6]
lowercase = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowercase = [4, 3, 10, 5]
lowercase = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowercase = [4, 4, 12, 6]
lowercase = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' , check_hash=lowerCAmelCase__ )
else:
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = checkpoint
lowercase = create_rename_keys(lowerCAmelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# load HuggingFace model
lowercase = SwiftFormerForImageClassification(lowerCAmelCase__ ).eval()
hf_model.load_state_dict(lowerCAmelCase__ )
# prepare test inputs
lowercase = prepare_img()
lowercase = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
lowercase = processor(images=lowerCAmelCase__ , return_tensors='''pt''' )
# compare outputs from both models
lowercase = get_expected_output(lowerCAmelCase__ )
lowercase = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase__ , atol=1E-3 )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
lowercase__ :Dict = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 101 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase = multiprocessing.Manager()
_UpperCAmelCase = manager.list()
_UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCAmelCase = shutil.rmtree
_UpperCAmelCase = os.rmdir
_UpperCAmelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCAmelCase = {}
with swallow_io():
with time_limit(_UpperCAmelCase ):
exec(_UpperCAmelCase , _UpperCAmelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
_UpperCAmelCase = rmtree
_UpperCAmelCase = rmdir
_UpperCAmelCase = chdir
@contextlib.contextmanager
def A ( _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase )
signal.signal(signal.SIGALRM , _UpperCAmelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCAmelCase ):
with contextlib.redirect_stderr(_UpperCAmelCase ):
with redirect_stdin(_UpperCAmelCase ):
yield
@contextlib.contextmanager
def A ( ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCAmelCase ):
yield dirname
class __lowerCAmelCase ( A ):
pass
class __lowerCAmelCase ( io.StringIO ):
def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]:
"""simple docstring"""
return False
class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore
UpperCamelCase = '''stdin'''
@contextlib.contextmanager
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if root == ".":
yield
return
_UpperCAmelCase = os.getcwd()
os.chdir(_UpperCAmelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str]=None ) -> Any:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCAmelCase = None
_UpperCAmelCase = None
import os
_UpperCAmelCase = '1'
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import shutil
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import subprocess
_UpperCAmelCase = None # type: ignore
_UpperCAmelCase = None
import sys
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
| 339 | 0 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowercase ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = AlbertConfig.from_json_file(_snake_case )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case : Tuple = AlbertForPreTraining(_snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 102 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str:
'''simple docstring'''
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False)
def A ( _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
return unittest.skip('Test was skipped' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict:
'''simple docstring'''
if test_case is None:
return partial(_UpperCAmelCase , version=_UpperCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase )
UpperCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase )
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]) -> str:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A)
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = AcceleratorState()
_UpperCAmelCase = tensor[None].clone().to(state.device )
_UpperCAmelCase = gather(_UpperCAmelCase ).cpu()
_UpperCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCAmelCase ):
return False
return True
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_UpperCAmelCase )
else:
break
async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(_UpperCAmelCase )
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput:
'''simple docstring'''
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) )
_UpperCAmelCase = ' '.join(_UpperCAmelCase )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class __lowerCAmelCase ( A ):
pass
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
_UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCAmelCase , 'decode' ):
_UpperCAmelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 339 | 0 |
from datetime import datetime as dt
import os
from github import Github
A__ : List[str] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def UpperCamelCase( ):
lowerCAmelCase_ : Union[str, Any] = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase_ : Tuple = g.get_repo('''huggingface/transformers''' )
lowerCAmelCase_ : int = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase_ : Optional[Any] = sorted([comment for comment in issue.get_comments()] ,key=lambda __UpperCamelCase : i.created_at ,reverse=__UpperCamelCase )
lowerCAmelCase_ : Tuple = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 103 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( _UpperCAmelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
_UpperCAmelCase = 0
return None
def A ( _UpperCAmelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 339 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'audio': Audio()} )
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'transcription': Value('string' )} )
SCREAMING_SNAKE_CASE : str = "audio"
SCREAMING_SNAKE_CASE : str = "transcription"
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] ,lowercase__ ):
raise ValueError(F"Column {self.audio_column} is not an Audio type." )
__lowercase = copy.deepcopy(self )
__lowercase = self.input_schema.copy()
__lowercase = features[self.audio_column]
__lowercase = input_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 104 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]:
"""simple docstring"""
import nltk
nltk.download('wordnet')
if NLTK_VERSION >= version.Version('3.6.5'):
nltk.download('punkt')
if NLTK_VERSION >= version.Version('3.6.6'):
nltk.download('omw-1.4')
def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any:
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5'):
_UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
else:
_UpperCAmelCase = [
meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
return {"meteor": np.mean(A)}
| 339 | 0 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : list[int] , _lowercase : int ) ->tuple[float, list[float]]:
'''simple docstring'''
a : Tuple = list(range(len(_lowercase ) ) )
a : Tuple = [v / w for v, w in zip(_lowercase , _lowercase )]
index.sort(key=lambda _lowercase : ratio[i] , reverse=_lowercase )
a : float = 0
a : list[float] = [0] * len(_lowercase )
for i in index:
if weight[i] <= capacity:
a : List[str] = 1
max_value += value[i]
capacity -= weight[i]
else:
a : Optional[int] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase__ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def A ( _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
print(F"{key} -> {new_key}" )
_UpperCAmelCase = s_dict.pop(_UpperCAmelCase )
return s_dict
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_UpperCAmelCase = emb.weight.data
return lin_layer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase = os.path.basename(_UpperCAmelCase )
_UpperCAmelCase = url.split('/' )[-2]
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop:
while True:
_UpperCAmelCase = source.read(8_192 )
if not buffer:
break
output.write(_UpperCAmelCase )
loop.update(len(_UpperCAmelCase ) )
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
_UpperCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
_UpperCAmelCase = original_checkpoint['dims']
_UpperCAmelCase = original_checkpoint['model_state_dict']
_UpperCAmelCase = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(_UpperCAmelCase )
rename_keys(_UpperCAmelCase )
_UpperCAmelCase = True
_UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0]
_UpperCAmelCase = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
_UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F" but all the following weights are missing {missing}" )
if tie_embeds:
_UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase = proj_out_weights
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 339 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__UpperCamelCase : List[str] = 8
def __SCREAMING_SNAKE_CASE ( A_ , A_=BITS ):
lowerCAmelCase__ : Any = x.device
lowerCAmelCase__ : Optional[int] = (x * 2_55).int().clamp(0 , 2_55 )
lowerCAmelCase__ : str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=A_ )
lowerCAmelCase__ : List[Any] = rearrange(A_ , '''d -> d 1 1''' )
lowerCAmelCase__ : Any = rearrange(A_ , '''b c h w -> b c 1 h w''' )
lowerCAmelCase__ : Dict = ((x & mask) != 0).float()
lowerCAmelCase__ : Any = rearrange(A_ , '''b c d h w -> b (c d) h w''' )
lowerCAmelCase__ : Union[str, Any] = bits * 2 - 1
return bits
def __SCREAMING_SNAKE_CASE ( A_ , A_=BITS ):
lowerCAmelCase__ : str = x.device
lowerCAmelCase__ : Dict = (x > 0).int()
lowerCAmelCase__ : Tuple = 2 ** torch.arange(bits - 1 , -1 , -1 , device=A_ , dtype=torch.intaa )
lowerCAmelCase__ : Dict = rearrange(A_ , '''d -> d 1 1''' )
lowerCAmelCase__ : List[str] = rearrange(A_ , '''b (c d) h w -> b c d h w''' , d=8 )
lowerCAmelCase__ : Union[str, Any] = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 2_55).clamp(0.0 , 1.0 )
def __SCREAMING_SNAKE_CASE ( self , A_ , A_ , A_ , A_ = 0.0 , A_ = True , A_=None , A_ = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCAmelCase__ : int = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCAmelCase__ : Tuple = self.alphas_cumprod[timestep]
lowerCAmelCase__ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCAmelCase__ : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCAmelCase__ : str = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase__ : Dict = torch.clamp(A_ , -scale , A_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCAmelCase__ : str = self._get_variance(A_ , A_ )
lowerCAmelCase__ : List[Any] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCAmelCase__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ : int = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ : Optional[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCAmelCase__ : List[str] = model_output.device if torch.is_tensor(A_ ) else '''cpu'''
lowerCAmelCase__ : Any = torch.randn(model_output.shape , dtype=model_output.dtype , generator=A_ ).to(A_ )
lowerCAmelCase__ : Dict = self._get_variance(A_ , A_ ) ** 0.5 * eta * noise
lowerCAmelCase__ : Optional[int] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=A_ , pred_original_sample=A_ )
def __SCREAMING_SNAKE_CASE ( self , A_ , A_ , A_ , A_="epsilon" , A_=None , A_ = True , ):
lowerCAmelCase__ : Union[str, Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCAmelCase__ ,lowerCAmelCase__ : int = torch.split(A_ , sample.shape[1] , dim=1 )
else:
lowerCAmelCase__ : List[Any] = None
# 1. compute alphas, betas
lowerCAmelCase__ : List[str] = self.alphas_cumprod[t]
lowerCAmelCase__ : Tuple = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCAmelCase__ : List[str] = 1 - alpha_prod_t
lowerCAmelCase__ : List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCAmelCase__ : Any = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
lowerCAmelCase__ : List[Any] = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase__ : Dict = torch.clamp(A_ , -scale , A_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ : Optional[int] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCAmelCase__ : int = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase__ : int = 0
if t > 0:
lowerCAmelCase__ : Optional[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=A_ ).to(model_output.device )
lowerCAmelCase__ : Union[str, Any] = (self._get_variance(A_ , predicted_variance=A_ ) ** 0.5) * noise
lowerCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=A_ , pred_original_sample=A_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : int ,lowercase_ : UNetaDConditionModel ,lowercase_ : Union[DDIMScheduler, DDPMScheduler] ,lowercase_ : Optional[float] = 1.0 ,):
super().__init__()
lowerCAmelCase__ : Any = bit_scale
lowerCAmelCase__ : Optional[int] = (
ddim_bit_scheduler_step if isinstance(lowercase_ ,lowercase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowercase_ ,scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] ,lowercase_ : Optional[int] = 2_5_6 ,lowercase_ : Optional[int] = 2_5_6 ,lowercase_ : Optional[int] = 5_0 ,lowercase_ : Optional[torch.Generator] = None ,lowercase_ : Optional[int] = 1 ,lowercase_ : Optional[str] = "pil" ,lowercase_ : bool = True ,**lowercase_ : Tuple ,):
lowerCAmelCase__ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=lowercase_ ,)
lowerCAmelCase__ : int = decimal_to_bits(lowercase_ ) * self.bit_scale
lowerCAmelCase__ : Optional[int] = latents.to(self.device )
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCAmelCase__ : Dict = self.unet(lowercase_ ,lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : int = self.scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ).prev_sample
lowerCAmelCase__ : Optional[Any] = bits_to_decimal(lowercase_ )
if output_type == "pil":
lowerCAmelCase__ : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 106 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCamelCase = None
UpperCamelCase = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
UpperCamelCase = datasets.Audio()
UpperCamelCase = '''audio'''
UpperCamelCase = AudioFolderConfig
UpperCamelCase = 42 # definition at the bottom of the script
UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' )
UpperCAmelCase__ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase__ = AUDIO_EXTENSIONS
| 339 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCAmelCase : List[Any] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( A : Dict, A : Union[str, Any], A : Optional[int]=None ):
'''simple docstring'''
if rng is None:
a = random.Random()
a = 1
for dim in shape:
total_dims *= dim
a = []
for _ in range(A ):
values.append(rng.randint(0, vocab_size - 1 ) )
a = np.array(A, dtype=jnp.intaa ).reshape(A )
return output
def __magic_name__ ( A : Dict, A : Union[str, Any]=None ):
'''simple docstring'''
a = ids_tensor(A, vocab_size=2, rng=A )
# make sure that at least one token is attended to for each batch
a = 1
return attn_mask
@require_flax
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Any = ()
def __UpperCAmelCase ( self : int ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
a = 2
a = inputs["input_ids"].shape[-1] // 2
a = inputs["input_ids"][:max_batch_size, :sequence_length]
a = jnp.ones_like(__lowerCamelCase )
a = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
a = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
a = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 0
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model_class.__name__[4:] # Skip the "Flax" at the beginning
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = pt_model_class(__lowerCamelCase ).eval()
a = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params )
a = flax_model.generate(__lowerCamelCase ).sequences
a = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : int ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
a = 0.8
a = 10
a = 0.3
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 2
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = 2
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
a = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
a = "Hello world"
a = tokenizer(__lowerCamelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase , "do_samples" ):
model.generate(__lowerCamelCase , do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase , "foo" ):
a = {"foo": "bar"}
model.generate(__lowerCamelCase , **__lowerCamelCase )
| 107 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
def __init__( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = []
def _lowerCamelCase ( self : Any , A : List[str]) -> int:
"""simple docstring"""
return self.node_position[vertex]
def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = pos
def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCAmelCase = 2 * start + 1
else:
_UpperCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child]
_UpperCAmelCase , _UpperCAmelCase = (
heap[start],
positions[start],
)
_UpperCAmelCase , _UpperCAmelCase = temp, tempa
_UpperCAmelCase = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , A)
self.top_to_bottom(A , A , A , A)
def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any:
"""simple docstring"""
_UpperCAmelCase = position[index]
while index != 0:
_UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
_UpperCAmelCase = heap[parent]
_UpperCAmelCase = position[parent]
self.set_position(position[parent] , A)
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , A)
break
_UpperCAmelCase = parent
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , 0)
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = len(A) // 2 - 1
for i in range(A , -1 , -1):
self.top_to_bottom(A , A , len(A) , A)
def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = positions[0]
_UpperCAmelCase = sys.maxsize
self.top_to_bottom(A , 0 , len(A) , A)
return temp
def A ( _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
_UpperCAmelCase = Heap()
_UpperCAmelCase = [0] * len(_UpperCAmelCase )
_UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCAmelCase = []
for vertex in range(len(_UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCAmelCase )
heap.node_position.append(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = 1
_UpperCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCAmelCase = 0
_UpperCAmelCase = distance
heap.heapify(_UpperCAmelCase , _UpperCAmelCase )
for _ in range(1 , len(_UpperCAmelCase ) ):
_UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCAmelCase )]
):
_UpperCAmelCase = distance
heap.bottom_to_top(
_UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 339 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : int = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Dict = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : str = "imagenet-1k-id2label.json"
lowerCAmelCase : Optional[Any] = 1_0_0_0
lowerCAmelCase : Union[str, Any] = "huggingface/label-files"
lowerCAmelCase : int = num_labels
lowerCAmelCase : List[str] = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
lowerCAmelCase : Tuple = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase : str = idalabel
lowerCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase : str = CvtConfig(num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowerCAmelCase : int = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowerCAmelCase : Tuple = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase : Dict = [2, 2, 2_0]
lowerCAmelCase : Optional[int] = [3, 1_2, 1_6]
lowerCAmelCase : Union[str, Any] = [1_9_2, 7_6_8, 1_0_2_4]
lowerCAmelCase : str = CvtForImageClassification(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
lowerCAmelCase : str = OrderedDict()
lowerCAmelCase : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase : Optional[Any] = list_of_state_dict + cls_token(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = list_of_state_dict + embeddings(SCREAMING_SNAKE_CASE )
for cnt in range(config.depth[idx] ):
lowerCAmelCase : Dict = list_of_state_dict + attention(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 108 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]:
'''simple docstring'''
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
_UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1
_UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple
_UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_UpperCAmelCase = logits[0, masked_index, :]
_UpperCAmelCase = logits.softmax(dim=0 )
_UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 )
_UpperCAmelCase = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] )
_UpperCAmelCase = tokenizer.mask_token
_UpperCAmelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
_UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(_UpperCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base")
UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
UpperCAmelCase__ = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 339 | 0 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 13 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE=[16, 32, 64, 128] , _SCREAMING_SNAKE_CASE = 7 , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 37 , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 10 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = [2, 2, 2, 2] , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : Any = image_size
UpperCAmelCase : Tuple = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : Union[str, Any] = encoder_stride
UpperCAmelCase : Dict = num_attention_outputs
UpperCAmelCase : List[str] = embed_dim
UpperCAmelCase : int = embed_dim + 1
UpperCAmelCase : Union[str, Any] = resolution
UpperCAmelCase : Any = depths
UpperCAmelCase : int = hidden_sizes
UpperCAmelCase : List[str] = dim
UpperCAmelCase : Optional[Any] = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = TFEfficientFormerModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Any = self.type_sequence_label_size
UpperCAmelCase : Optional[Any] = TFEfficientFormerForImageClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase : List[str] = 1
UpperCAmelCase : Optional[Any] = TFEfficientFormerForImageClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = config_and_inputs
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__lowerCAmelCase : Optional[int] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__lowerCAmelCase : Any = False
__lowerCAmelCase : str = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Tuple = False
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TFEfficientFormerModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(
self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Any = [*signature.parameters.keys()]
UpperCAmelCase : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , training=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
if hasattr(self.model_tester , """encoder_seq_length""" ):
UpperCAmelCase : str = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
UpperCAmelCase : List[Any] = seq_length * self.model_tester.chunk_length
else:
UpperCAmelCase : str = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
UpperCAmelCase : Dict = outputs.decoder_hidden_states
self.asseretIsInstance(_SCREAMING_SNAKE_CASE , (list, tuple) )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = getattr(self.model_tester , """seq_length""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = getattr(self.model_tester , """decoder_seq_length""" , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[int] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Optional[int] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = TFEfficientFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Tuple = getattr(self.model_tester , """seq_length""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = getattr(self.model_tester , """encoder_seq_length""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = getattr(self.model_tester , """key_length""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = getattr(self.model_tester , """chunk_length""" , _SCREAMING_SNAKE_CASE )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
UpperCAmelCase : Dict = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : str = False
UpperCAmelCase : List[Any] = True
UpperCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , training=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , training=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCAmelCase : Optional[int] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_SCREAMING_SNAKE_CASE )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs_dict is not None )
def _snake_case ( ):
UpperCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Any = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : str = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
UpperCAmelCase : List[Any] = model(**_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
UpperCAmelCase : str = self.default_image_processor
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
UpperCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 109 |
import math
import unittest
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
with self.assertRaises(A):
is_prime(-19)
self.assertFalse(
is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 339 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCamelCase : Tuple = None
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : List[Any] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : Any = {
"facebook/nllb-large-en-ro": 1_0_2_4,
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
lowerCamelCase : str = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class A__ ( A__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = ['input_ids', 'attention_mask']
A__ = NllbTokenizer
A__ = []
A__ = []
def __init__( self : Any , _a : Dict=None , _a : str=None , _a : Optional[Any]="<s>" , _a : Optional[int]="</s>" , _a : Tuple="</s>" , _a : Union[str, Any]="<s>" , _a : Union[str, Any]="<unk>" , _a : Union[str, Any]="<pad>" , _a : Union[str, Any]="<mask>" , _a : List[str]=None , _a : List[str]=None , _a : Optional[Any]=None , _a : List[str]=False , **_a : Optional[Any] , ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_SCREAMING_SNAKE_CASE =legacy_behaviour
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , legacy_behaviour=_a , **_a , )
_SCREAMING_SNAKE_CASE =vocab_file
_SCREAMING_SNAKE_CASE =False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE =FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_SCREAMING_SNAKE_CASE ={
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE =src_lang if src_lang is not None else 'eng_Latn'
_SCREAMING_SNAKE_CASE =self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A ( self : str , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A ( self : List[str] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : Optional[int] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self : Optional[Any] , _a : Tuple , _a : str , _a : Optional[str] , _a : Optional[str] , **_a : int ) -> str:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_SCREAMING_SNAKE_CASE =src_lang
_SCREAMING_SNAKE_CASE =self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
_SCREAMING_SNAKE_CASE =self.convert_tokens_to_ids(_a )
_SCREAMING_SNAKE_CASE =tgt_lang_id
return inputs
def A ( self : Union[str, Any] , _a : List[str] , _a : str = "eng_Latn" , _a : Optional[List[str]] = None , _a : str = "fra_Latn" , **_a : Union[str, Any] , ) -> BatchEncoding:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =src_lang
_SCREAMING_SNAKE_CASE =tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def A ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A ( self : int , _a : Dict ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE =[self.cur_lang_code]
_SCREAMING_SNAKE_CASE =[self.eos_token_id]
_SCREAMING_SNAKE_CASE =self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE =self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE =processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A ( self : Tuple , _a : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[self.eos_token_id, self.cur_lang_code]
else:
_SCREAMING_SNAKE_CASE =[self.cur_lang_code]
_SCREAMING_SNAKE_CASE =[self.eos_token_id]
_SCREAMING_SNAKE_CASE =self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE =self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE =processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A ( self : str , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
_SCREAMING_SNAKE_CASE =os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 47 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : str) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A , hypotheses=A , min_len=A , max_len=A)
}
| 339 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=30 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=None , ) -> List[Any]:
__lowercase : List[str] = parent
__lowercase : Optional[int] = batch_size
__lowercase : Dict = image_size
__lowercase : Dict = patch_size
__lowercase : Optional[int] = num_channels
__lowercase : int = is_training
__lowercase : Optional[int] = use_labels
__lowercase : Tuple = hidden_size
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Tuple = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : List[Any] = hidden_dropout_prob
__lowercase : Any = attention_probs_dropout_prob
__lowercase : Dict = type_sequence_label_size
__lowercase : List[Any] = initializer_range
__lowercase : List[str] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[Any] = (image_size // patch_size) ** 2
__lowercase : Optional[int] = num_patches + 1
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : Any = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ) -> Tuple:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
__lowercase : Dict = TFViTModel(config=UpperCamelCase_ )
__lowercase : List[Any] = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__lowercase : Optional[int] = self.image_size // 2
__lowercase : Dict = pixel_values[:, :, :image_size, :image_size]
__lowercase : str = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
__lowercase : List[Any] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
__lowercase : Optional[int] = self.type_sequence_label_size
__lowercase : Dict = TFViTForImageClassification(UpperCamelCase_ )
__lowercase : str = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__lowercase : List[str] = self.image_size // 2
__lowercase : Optional[int] = pixel_values[:, :, :image_size, :image_size]
__lowercase : Optional[Any] = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase : List[str] = 1
__lowercase : List[str] = TFViTForImageClassification(UpperCamelCase_ )
__lowercase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase : Optional[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase : List[str] = config_and_inputs
__lowercase : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase =(
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase =False
UpperCamelCase =False
UpperCamelCase =False
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : List[Any] = TFViTModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def _lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowerCamelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowerCamelCase ( self ) -> List[str]:
pass
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase ,__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__lowercase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def _lowerCamelCase ( self ) -> Any:
__lowercase ,__lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(UpperCamelCase_ )
__lowercase : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Optional[int] = [*signature.parameters.keys()]
__lowercase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> str:
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Tuple = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase_ )
def __UpperCAmelCase ( ):
__lowercase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ) -> List[Any]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ) -> int:
__lowercase : List[Any] = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
__lowercase : List[Any] = self.default_image_processor
__lowercase : str = prepare_img()
__lowercase : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# forward pass
__lowercase : int = model(**UpperCamelCase_ )
# verify the logits
__lowercase : Optional[int] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__lowercase : Union[str, Any] = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 )
| 249 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 339 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_a )
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_a = Features({'audio': Audio()} )
_a = Features({'labels': ClassLabel} )
_a = 'audio'
_a = 'labels'
def __lowercase ( self : Optional[Any] , lowerCAmelCase : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase = copy.deepcopy(self )
lowerCAmelCase = self.label_schema.copy()
lowerCAmelCase = features[self.label_column]
lowerCAmelCase = label_schema
return task_template
@property
def __lowercase ( self : int ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 155 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''open-llama'''
def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , A)
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}")
_UpperCAmelCase = self.rope_scaling.get('type' , A)
_UpperCAmelCase = self.rope_scaling.get('factor' , A)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 339 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case_ ( unittest.TestCase ):
def __init__( self : Tuple , lowercase_ : Any , lowercase_ : int=7 , lowercase_ : str=3 , lowercase_ : Optional[int]=30 , lowercase_ : Union[str, Any]=4_00 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=None , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=[0.5, 0.5, 0.5] , lowercase_ : Optional[int]=[0.5, 0.5, 0.5] , lowercase_ : str=True , lowercase_ : List[str]=1 / 2_55 , lowercase_ : List[str]=True , ) -> Any:
lowercase__ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
lowercase__ : int = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Tuple = num_channels
lowercase__ : str = min_resolution
lowercase__ : Optional[Any] = max_resolution
lowercase__ : Tuple = do_resize
lowercase__ : Dict = size
lowercase__ : Optional[Any] = do_normalize
lowercase__ : List[Any] = image_mean
lowercase__ : Tuple = image_std
lowercase__ : List[str] = do_rescale
lowercase__ : Tuple = rescale_factor
lowercase__ : str = do_pad
def __UpperCamelCase ( self : int ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[int] , lowercase_ : int=False ) -> Union[str, Any]:
if not batched:
lowercase__ : Any = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowercase__ , lowercase__ : Optional[int] = image.size
else:
lowercase__ , lowercase__ : str = image.shape[1], image.shape[2]
if w < h:
lowercase__ : List[Any] = int(self.size["shortest_edge"] * h / w )
lowercase__ : Any = self.size["shortest_edge"]
elif w > h:
lowercase__ : Optional[int] = self.size["shortest_edge"]
lowercase__ : Optional[Any] = int(self.size["shortest_edge"] * w / h )
else:
lowercase__ : Union[str, Any] = self.size["shortest_edge"]
lowercase__ : Optional[int] = self.size["shortest_edge"]
else:
lowercase__ : Union[str, Any] = []
for image in image_inputs:
lowercase__ , lowercase__ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowercase__ : Tuple = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case_ ( __A ,unittest.TestCase ):
__A : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Dict ) -> int:
lowercase__ : Tuple = DetaImageProcessingTester(self )
@property
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , "image_mean" ) )
self.assertTrue(hasattr(lowercase_ , "image_std" ) )
self.assertTrue(hasattr(lowercase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase_ , "do_resize" ) )
self.assertTrue(hasattr(lowercase_ , "do_rescale" ) )
self.assertTrue(hasattr(lowercase_ , "do_pad" ) )
self.assertTrue(hasattr(lowercase_ , "size" ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
pass
def __UpperCamelCase ( self : List[Any] ) -> int:
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowercase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowercase__ : Any = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : int ) -> List[str]:
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowercase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : List[str] = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : str = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : Dict = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowercase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Optional[int] = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> str:
lowercase__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowercase__ : str = json.loads(f.read() )
lowercase__ : Any = {"image_id": 3_97_69, "annotations": target}
# encode them
lowercase__ : Union[str, Any] = DetaImageProcessor()
lowercase__ : List[Any] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="pt" )
# verify pixel values
lowercase__ : Any = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowercase_ )
lowercase__ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
lowercase__ : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase_ ) )
# verify boxes
lowercase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase_ )
lowercase__ : int = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
lowercase__ : Optional[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase_ ) )
# verify is_crowd
lowercase__ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase_ ) )
# verify class_labels
lowercase__ : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase_ ) )
# verify orig_size
lowercase__ : Optional[int] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase_ ) )
# verify size
lowercase__ : Any = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase_ ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
lowercase__ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowercase__ : Optional[int] = json.loads(f.read() )
lowercase__ : int = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
lowercase__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowercase__ : Optional[Any] = DetaImageProcessor(format="coco_panoptic" )
lowercase__ : Optional[int] = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="pt" )
# verify pixel values
lowercase__ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowercase_ )
lowercase__ : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
lowercase__ : int = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase_ ) )
# verify boxes
lowercase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase_ )
lowercase__ : int = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
lowercase__ : Any = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase_ ) )
# verify is_crowd
lowercase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase_ ) )
# verify class_labels
lowercase__ : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase_ ) )
# verify masks
lowercase__ : int = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase_ )
# verify orig_size
lowercase__ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase_ ) )
# verify size
lowercase__ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase_ ) )
| 87 |
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 339 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=3_2 , _UpperCAmelCase=3 , _UpperCAmelCase=1_0 , _UpperCAmelCase=[1_0, 2_0, 3_0, 4_0] , _UpperCAmelCase=[1, 1, 2, 1] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=3 , _UpperCAmelCase=None , ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : List[Any] = batch_size
__UpperCamelCase : Any = image_size
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Dict = embeddings_size
__UpperCamelCase : Tuple = hidden_sizes
__UpperCamelCase : Any = depths
__UpperCamelCase : List[Any] = is_training
__UpperCamelCase : Dict = use_labels
__UpperCamelCase : List[str] = hidden_act
__UpperCamelCase : Tuple = num_labels
__UpperCamelCase : Optional[int] = scope
__UpperCamelCase : Any = len(_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : int = None
if self.use_labels:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def a_ (self ) -> Union[str, Any]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[int] = TFResNetModel(config=_UpperCAmelCase )
__UpperCamelCase : str = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = self.num_labels
__UpperCamelCase : Tuple = TFResNetForImageClassification(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = config_and_inputs
__UpperCamelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
A = False
A = False
A = False
A = False
A = False
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Dict = TFResNetModelTester(self )
__UpperCamelCase : str = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def a_ (self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ (self ) -> int:
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def a_ (self ) -> Optional[Any]:
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def a_ (self ) -> List[Any]:
pass
def a_ (self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : List[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> str:
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__UpperCamelCase : Union[str, Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase : Any = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCamelCase , __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : int = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCamelCase : str = layer_type
__UpperCamelCase : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Any = TFResNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a_ (self ) -> List[str]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__UpperCamelCase : List[str] = self.default_image_processor
__UpperCamelCase : Any = prepare_img()
__UpperCamelCase : int = image_processor(images=_UpperCAmelCase , return_tensors="tf" )
# forward pass
__UpperCamelCase : Optional[Any] = model(**_UpperCAmelCase )
# verify the logits
__UpperCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__UpperCamelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCAmelCase , atol=1E-4 ) )
| 298 |
from functools import reduce
UpperCAmelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( _UpperCAmelCase : str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
from ...configuration_utils import PretrainedConfig
lowerCAmelCase = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class A ( A_ ):
UpperCamelCase_ : Dict ='''tapas'''
def __init__(self , lowerCAmelCase=3_0_5_2_2 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0 , lowerCAmelCase=1_0.0 , lowerCAmelCase=0 , lowerCAmelCase=1.0 , lowerCAmelCase=None , lowerCAmelCase=1.0 , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=1.0 , lowerCAmelCase=1.0 , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase="ratio" , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=6_4 , lowerCAmelCase=3_2 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_sizes
__lowercase= initializer_range
__lowercase= layer_norm_eps
# Fine-tuning task hyperparameters
__lowercase= positive_label_weight
__lowercase= num_aggregation_labels
__lowercase= aggregation_loss_weight
__lowercase= use_answer_as_supervision
__lowercase= answer_loss_importance
__lowercase= use_normalized_answer_loss
__lowercase= huber_loss_delta
__lowercase= temperature
__lowercase= aggregation_temperature
__lowercase= use_gumbel_for_cells
__lowercase= use_gumbel_for_aggregation
__lowercase= average_approximation_function
__lowercase= cell_selection_preference
__lowercase= answer_loss_cutoff
__lowercase= max_num_rows
__lowercase= max_num_columns
__lowercase= average_logits_per_cell
__lowercase= select_one_column
__lowercase= allow_empty_column_selection
__lowercase= init_cell_selection_weights_to_zero
__lowercase= reset_position_index_per_cell
__lowercase= disable_per_token_loss
# Aggregation hyperparameters
__lowercase= aggregation_labels
__lowercase= no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCAmelCase ):
__lowercase= {int(lowerCAmelCase ): v for k, v in aggregation_labels.items()}
| 295 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for row in range(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = matrix[row][col]
_UpperCAmelCase = vector[row][0]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCAmelCase ):
_UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCAmelCase ):
for row in range(_UpperCAmelCase ):
_UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase )
]
def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for x_val, y_val in enumerate(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase = y_val
_UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase )
def interpolated_func(_UpperCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCAmelCase ) )
return interpolated_func
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int:
'''simple docstring'''
_UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase = 0
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for poly in polynomials:
_UpperCAmelCase = 1
while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ):
x_val += 1
ret += poly(_UpperCAmelCase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase_ : Optional[Any] = TypeVar('KEY')
lowerCAmelCase_ : Any = TypeVar('VAL')
@dataclass(frozen=lowerCamelCase_ , slots=lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (Generic[KEY, VAL] ):
"""simple docstring"""
__a =42
__a =42
class __SCREAMING_SNAKE_CASE (_Item ):
"""simple docstring"""
def __init__( self : Optional[Any] ):
super().__init__(__a , __a )
def __bool__( self : Any ):
return False
lowerCAmelCase_ : List[str] = _DeletedItem()
class __SCREAMING_SNAKE_CASE (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : List[str] , __a : int = 8 , __a : float = 0.75 ):
_a = initial_block_size
_a = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_a = capacity_factor
_a = 0
def UpperCamelCase__ ( self : Union[str, Any] , __a : KEY ):
return hash(__a ) % len(self._buckets )
def UpperCamelCase__ ( self : Optional[int] , __a : int ):
return (ind + 1) % len(self._buckets )
def UpperCamelCase__ ( self : int , __a : int , __a : KEY , __a : VAL ):
_a = self._buckets[ind]
if not stored:
_a = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_a = _Item(__a , __a )
return True
else:
return False
def UpperCamelCase__ ( self : Any ):
_a = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def UpperCamelCase__ ( self : List[str] ):
if len(self._buckets ) <= self._initial_block_size:
return False
_a = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCamelCase__ ( self : Tuple , __a : int ):
_a = self._buckets
_a = [None] * new_size
_a = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCamelCase__ ( self : Any ):
self._resize(len(self._buckets ) * 2 )
def UpperCamelCase__ ( self : Union[str, Any] ):
self._resize(len(self._buckets ) // 2 )
def UpperCamelCase__ ( self : Union[str, Any] , __a : KEY ):
_a = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_a = self._get_next_ind(__a )
def UpperCamelCase__ ( self : Any , __a : KEY , __a : VAL ):
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ):
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : Dict , __a : KEY ):
for ind in self._iterate_buckets(__a ):
_a = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_a = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Any , __a : KEY ):
for ind in self._iterate_buckets(__a ):
_a = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : int ):
return self._len
def __iter__( self : Dict ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Tuple ):
_a = " ,".join(
f'{item.key}: {item.val}' for item in self._buckets if item )
return f'HashMap({val_string})'
| 63 |
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] ) -> bool:
'''simple docstring'''
return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | 0 |
"""simple docstring"""
__magic_name__ = [0, 2, 4, 6, 8]
__magic_name__ = [1, 3, 5, 7, 9]
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__SCREAMING_SNAKE_CASE = 0
for digit in range(10 ):
__SCREAMING_SNAKE_CASE = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _UpperCAmelCase , _UpperCAmelCase )
return result
__SCREAMING_SNAKE_CASE = 0
for digita in range(10 ):
__SCREAMING_SNAKE_CASE = digita
if (remainder + digita) % 2 == 0:
__SCREAMING_SNAKE_CASE = ODD_DIGITS
else:
__SCREAMING_SNAKE_CASE = EVEN_DIGITS
for digita in other_parity_digits:
__SCREAMING_SNAKE_CASE = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _UpperCAmelCase , _UpperCAmelCase , )
return result
def _lowerCAmelCase ( UpperCamelCase_ = 9 ):
__SCREAMING_SNAKE_CASE = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_UpperCAmelCase , 0 , [0] * length , _UpperCAmelCase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100 |
import os
UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(_UpperCAmelCase ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase )
_UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase )
savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "xmod"
def __init__( self : Optional[Any] , _lowercase : str=3_05_22 , _lowercase : List[Any]=7_68 , _lowercase : List[str]=12 , _lowercase : Union[str, Any]=12 , _lowercase : List[Any]=30_72 , _lowercase : Tuple="gelu" , _lowercase : List[Any]=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Any=5_12 , _lowercase : Tuple=2 , _lowercase : Any=0.02 , _lowercase : Tuple=1E-12 , _lowercase : List[str]=1 , _lowercase : Any=0 , _lowercase : Optional[Any]=2 , _lowercase : List[str]="absolute" , _lowercase : Union[str, Any]=True , _lowercase : int=None , _lowercase : Optional[int]=False , _lowercase : Optional[int]=2 , _lowercase : Tuple=False , _lowercase : int=True , _lowercase : str=True , _lowercase : Tuple=("en_XX",) , _lowercase : Optional[Any]=None , **_lowercase : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = pre_norm
SCREAMING_SNAKE_CASE__ = adapter_reduction_factor
SCREAMING_SNAKE_CASE__ = adapter_layer_norm
SCREAMING_SNAKE_CASE__ = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE__ = ln_before_adapter
SCREAMING_SNAKE_CASE__ = list(_lowercase )
SCREAMING_SNAKE_CASE__ = default_language
class __snake_case ( lowerCamelCase_ ):
@property
def __a ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 219 |
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' )
_UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
_UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 339 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 200 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerModelTester(self)
@slow
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A)
_UpperCAmelCase = model(np.ones((1, 1)))
self.assertIsNotNone(A)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
_UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]])
_UpperCAmelCase = model(A)[0]
_UpperCAmelCase = 5_00_00
_UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , A)
_UpperCAmelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
| 339 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ :int = logging.get_logger(__name__)
A_ :Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
A_ :Dict = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
A_ :str = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A ( ) -> List[Any]:
__UpperCamelCase : Tuple =(
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
__UpperCamelCase : List[str] =bs[:]
__UpperCamelCase : Any =0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
__UpperCamelCase : Any =[chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase ,_UpperCAmelCase ) )
def A ( a_ ) -> Dict:
__UpperCamelCase : Any =set()
__UpperCamelCase : List[str] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase : Optional[int] =char
return pairs
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Any =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str =["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Any =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
__UpperCamelCase : Union[str, Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
__UpperCamelCase : List[str] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
__UpperCamelCase : Tuple =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
__UpperCamelCase : Optional[Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
__UpperCamelCase : Tuple =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase : Tuple =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase : int =json.load(lowerCamelCase__ )
__UpperCamelCase : Tuple ={v: k for k, v in self.encoder.items()}
__UpperCamelCase : List[str] =errors # how to handle errors in decoding
__UpperCamelCase : Dict =bytes_to_unicode()
__UpperCamelCase : Union[str, Any] ={v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding='utf-8' ) as merges_handle:
__UpperCamelCase : List[Any] =merges_handle.read().split('\n' )[1:-1]
__UpperCamelCase : Optional[int] =[tuple(merge.split() ) for merge in bpe_merges]
__UpperCamelCase : Optional[int] =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCamelCase : Union[str, Any] ={}
__UpperCamelCase : Tuple =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCamelCase : List[Any] =re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __lowercase ( self ):
"""simple docstring"""
return len(self.encoder )
def __lowercase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__UpperCamelCase : Optional[int] =tuple(lowerCamelCase__ )
__UpperCamelCase : List[str] =get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
__UpperCamelCase : Tuple =min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =bigram
__UpperCamelCase : int =[]
__UpperCamelCase : List[Any] =0
while i < len(lowerCamelCase__ ):
try:
__UpperCamelCase : Dict =word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase : str =j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase : Optional[Any] =tuple(lowerCamelCase__ )
__UpperCamelCase : int =new_word
if len(lowerCamelCase__ ) == 1:
break
else:
__UpperCamelCase : Union[str, Any] =get_pairs(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =' '.join(lowerCamelCase__ )
__UpperCamelCase : Dict =word
return word
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =[]
for token in re.findall(self.pat , lowerCamelCase__ ):
__UpperCamelCase : Optional[Any] =''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(' ' ) )
return bpe_tokens
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Dict =''.join(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Dict =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : Tuple =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + '\n' )
__UpperCamelCase : List[Any] =0
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
__UpperCamelCase : Dict =token_index
writer.write(' '.join(lowerCamelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase : Optional[Any] =[self.cls_token_id]
__UpperCamelCase : Optional[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[str] =[self.sep_token_id]
__UpperCamelCase : Any =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
__UpperCamelCase : List[Any] =' ' + text
return (text, kwargs)
| 71 |
UpperCAmelCase__ = {}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_UpperCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 )
_UpperCAmelCase = state_late + state_absent + state_ontime
_UpperCAmelCase = prizestrings
return prizestrings
def A ( _UpperCAmelCase : int = 30 ) -> int:
'''simple docstring'''
return _calculate(_UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 339 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.