code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _a ( __lowercase ):
"""simple docstring"""
def __init__( self : List[Any] , a : Distribution , a : List[Any]=None , a : Optional[int]=None , a : Union[str, Any]=0 ) ->Any:
SCREAMING_SNAKE_CASE__ : Any = 1.0 if scale is None else scale
SCREAMING_SNAKE_CASE__ : int = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def A_ ( self : Tuple ) ->Union[str, Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def A_ ( self : Dict ) ->List[str]:
return self.base_dist.variance * self.scale**2
@property
def A_ ( self : List[str] ) ->int:
return self.variance.sqrt()
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , a : int , a : Dict[str, int] , a : Callable[..., Tuple[torch.Tensor]] , **a : Optional[Any] ) ->None:
super().__init__(**__a )
SCREAMING_SNAKE_CASE__ : Optional[int] = args_dim
SCREAMING_SNAKE_CASE__ : List[str] = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
SCREAMING_SNAKE_CASE__ : str = domain_map
def A_ ( self : List[str] , a : torch.Tensor ) ->Tuple[torch.Tensor]:
SCREAMING_SNAKE_CASE__ : List[Any] = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , a : str ) ->Tuple:
super().__init__()
SCREAMING_SNAKE_CASE__ : Any = function
def A_ ( self : str , a : Union[str, Any] , *a : Optional[Any] ) ->Any:
return self.function(__a , *__a )
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self : Dict , a : int = 1 ) ->None:
SCREAMING_SNAKE_CASE__ : List[Any] = dim
SCREAMING_SNAKE_CASE__ : List[Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def A_ ( self : List[str] , a : Dict ) ->List[Any]:
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def A_ ( self : Any , a : str , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , ) ->Distribution:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def A_ ( self : str ) ->Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def A_ ( self : Union[str, Any] ) ->int:
return len(self.event_shape )
@property
def A_ ( self : Dict ) ->float:
return 0.0
def A_ ( self : Union[str, Any] , a : int ) ->nn.Module:
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A_ ( self : Any , *a : torch.Tensor ) ->str:
raise NotImplementedError()
@staticmethod
def A_ ( a : torch.Tensor ) ->torch.Tensor:
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class _a ( __lowercase ):
"""simple docstring"""
snake_case_ = {"df": 1, "loc": 1, "scale": 1}
snake_case_ = StudentT
@classmethod
def A_ ( cls : Optional[Any] , a : torch.Tensor , a : torch.Tensor , a : torch.Tensor ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
SCREAMING_SNAKE_CASE__ : Optional[int] = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _a ( __lowercase ):
"""simple docstring"""
snake_case_ = {"loc": 1, "scale": 1}
snake_case_ = Normal
@classmethod
def A_ ( cls : Optional[int] , a : torch.Tensor , a : torch.Tensor ) ->str:
SCREAMING_SNAKE_CASE__ : int = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _a ( __lowercase ):
"""simple docstring"""
snake_case_ = {"total_count": 1, "logits": 1}
snake_case_ = NegativeBinomial
@classmethod
def A_ ( cls : Tuple , a : torch.Tensor , a : torch.Tensor ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A_ ( self : List[Any] , a : Union[str, Any] ) ->Distribution:
SCREAMING_SNAKE_CASE__ : Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def A_ ( self : str , a : Any , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None ) ->Distribution:
SCREAMING_SNAKE_CASE__ : List[str] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 719 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BioGptTokenizer
snake_case_ = False
def A_ ( self : Any ) ->List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
SCREAMING_SNAKE_CASE__ : int = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
SCREAMING_SNAKE_CASE__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
def A_ ( self : List[str] , a : Optional[Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = "lower newer"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "lower newer"
return input_text, output_text
def A_ ( self : Tuple ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = BioGptTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ : Dict = "lower"
SCREAMING_SNAKE_CASE__ : str = ["low", "er</w>"]
SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = tokens + ["<unk>"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def A_ ( self : Any ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ : str = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 720 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _a :
"""simple docstring"""
def __init__( self : Dict , a : Optional[Any] , a : int=13 , a : int=7 , a : List[str]=True , a : Union[str, Any]=True , a : Any=True , a : str=True , a : List[str]=99 , a : List[str]=32 , a : int=2 , a : Any=4 , a : List[Any]=37 , a : Any="gelu" , a : List[str]=0.1 , a : Optional[int]=0.1 , a : Tuple=5_12 , a : List[Any]=16 , a : Any=2 , a : Optional[Any]=0.02 , a : str=3 , a : Optional[Any]=4 , a : Optional[int]=None , ) ->int:
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : Dict = 13
SCREAMING_SNAKE_CASE__ : List[Any] = 7
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : Dict = 99
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 32
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE__ : Dict = 37
SCREAMING_SNAKE_CASE__ : int = "gelu"
SCREAMING_SNAKE_CASE__ : str = 0.1
SCREAMING_SNAKE_CASE__ : List[Any] = 0.1
SCREAMING_SNAKE_CASE__ : Dict = 5_12
SCREAMING_SNAKE_CASE__ : str = 16
SCREAMING_SNAKE_CASE__ : Dict = 2
SCREAMING_SNAKE_CASE__ : str = 0.02
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3
SCREAMING_SNAKE_CASE__ : Optional[Any] = 4
SCREAMING_SNAKE_CASE__ : str = None
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Union[str, Any] , a : Optional[Any] , a : str , a : Optional[int] , a : Tuple , a : Union[str, Any] , a : Optional[int] , a : Optional[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = TFRoFormerModel(config=A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[str] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ : str = model(A__ )
SCREAMING_SNAKE_CASE__ : str = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[Any] , a : Dict , a : Optional[int] , a : Optional[Any] , a : Optional[Any] , a : List[str] , a : Dict , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : int = TFRoFormerForCausalLM(config=A__ )
SCREAMING_SNAKE_CASE__ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE__ : str = model(A__ )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A_ ( self : Dict , a : Tuple , a : str , a : Optional[Any] , a : Union[str, Any] , a : Union[str, Any] , a : Dict , a : int ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = TFRoFormerForMaskedLM(config=A__ )
SCREAMING_SNAKE_CASE__ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE__ : Tuple = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : int , a : Optional[int] , a : Optional[Any] , a : List[str] , a : List[Any] , a : Optional[Any] , a : Union[str, Any] , a : int ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = TFRoFormerForSequenceClassification(config=A__ )
SCREAMING_SNAKE_CASE__ : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE__ : str = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Any , a : int , a : Optional[Any] , a : Tuple , a : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE__ : Dict = TFRoFormerForMultipleChoice(config=A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Optional[Any] , a : Dict , a : Optional[Any] , a : Any , a : Dict , a : str , a : int , a : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = TFRoFormerForTokenClassification(config=A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE__ : Tuple = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Optional[int] , a : Optional[Any] , a : Dict , a : Tuple , a : List[str] , a : str , a : Optional[int] , a : str ) ->Dict:
SCREAMING_SNAKE_CASE__ : Any = TFRoFormerForQuestionAnswering(config=A__ )
SCREAMING_SNAKE_CASE__ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE__ : Any = model(A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( __a , __a , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def A_ ( self : List[str] , a : str , a : int , a : Union[str, Any] , a : List[str] , a : List[Any] ) ->Union[str, Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A_ ( self : Tuple ) ->Any:
SCREAMING_SNAKE_CASE__ : List[str] = TFRoFormerModelTester(self )
SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=A__ , hidden_size=37 )
def A_ ( self : str ) ->str:
self.config_tester.run_common_tests()
def A_ ( self : List[str] ) ->Any:
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def A_ ( self : Dict ) ->List[str]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def A_ ( self : Tuple ) ->int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A__ )
def A_ ( self : Tuple ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def A_ ( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def A_ ( self : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def A_ ( self : Any ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def A_ ( self : Tuple ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(A__ )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self : Union[str, Any] ) ->int:
SCREAMING_SNAKE_CASE__ : List[Any] = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
SCREAMING_SNAKE_CASE__ : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(A__ )[0]
# TODO Replace vocab size
SCREAMING_SNAKE_CASE__ : Optional[Any] = 5_00_00
SCREAMING_SNAKE_CASE__ : List[str] = [1, 6, vocab_size]
self.assertEqual(output.shape , A__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-4 )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = 1e-4
def A_ ( self : Any ) ->List[str]:
SCREAMING_SNAKE_CASE__ : str = tf.constant([[4, 10]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = emba(input_ids.shape )
SCREAMING_SNAKE_CASE__ : Dict = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
SCREAMING_SNAKE_CASE__ : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
SCREAMING_SNAKE_CASE__ : int = emba.weight[:3, :5]
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = 1e-4
def A_ ( self : str ) ->List[Any]:
# 2,12,16,64
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
SCREAMING_SNAKE_CASE__ : Dict = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
SCREAMING_SNAKE_CASE__ : Optional[int] = embed_positions([2, 16, 7_68] )[None, None, :, :]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A__ , A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
SCREAMING_SNAKE_CASE__ : Tuple = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
| 721 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a ( unittest.TestCase ):
"""simple docstring"""
@property
def A_ ( self : Union[str, Any] ) ->str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE__ : Tuple = PNDMScheduler()
SCREAMING_SNAKE_CASE__ : Optional[Any] = PNDMPipeline(unet=a , scheduler=a )
pndm.to(a )
pndm.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pndm(generator=a , num_inference_steps=20 , output_type="numpy" ).images
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pndm(generator=a , num_inference_steps=20 , output_type="numpy" , return_dict=a )[0]
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Tuple = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Tuple ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = "google/ddpm-cifar10-32"
SCREAMING_SNAKE_CASE__ : int = UNetaDModel.from_pretrained(a )
SCREAMING_SNAKE_CASE__ : Tuple = PNDMScheduler()
SCREAMING_SNAKE_CASE__ : Optional[int] = PNDMPipeline(unet=a , scheduler=a )
pndm.to(a )
pndm.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = pndm(generator=a , output_type="numpy" ).images
SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : int = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 700 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 0 |
from math import sqrt
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 0
for i in range(1 , int(sqrt(lowercase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase__ ):
total += i + n // i
elif i == sqrt(lowercase__ ):
total += i
return total - n
def UpperCAmelCase ( _lowerCamelCase : List[str] = 10_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = sum(
i
for i in range(1 , lowercase__ )
if sum_of_divisors(sum_of_divisors(lowercase__ ) ) == i and sum_of_divisors(lowercase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 701 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( a__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = XLMTokenizer
snake_case_ = False
def A_ ( self : str ) ->Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE__ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(lowercase__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(lowercase__ ) )
def A_ ( self : Any , a : Optional[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = '''lower newer'''
SCREAMING_SNAKE_CASE__ : Dict = '''lower newer'''
return input_text, output_text
def A_ ( self : Any ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ : Optional[int] = '''lower'''
SCREAMING_SNAKE_CASE__ : str = ['''low''', '''er</w>''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = tokens + ['''<unk>''']
SCREAMING_SNAKE_CASE__ : Dict = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
@slow
def A_ ( self : Any ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode("sequence builders" , add_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 702 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 0 |
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__lowercase :Optional[Any] = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__ : int = _str_to_version_tuple(self.version_str )
def __repr__( self : Union[str, Any] ) ->str:
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def A_ ( self : Any ) ->Dict:
return self.major, self.minor, self.patch
def A_ ( self : Optional[int] , a : Optional[int] ) ->Optional[int]:
if isinstance(lowercase__ , lowercase__ ):
return Version(lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
return other
raise TypeError(f"""{other} (type {type(lowercase__ )}) cannot be compared to version.""" )
def __eq__( self : Optional[int] , a : Optional[int] ) ->Union[str, Any]:
try:
SCREAMING_SNAKE_CASE__ : List[Any] = self._validate_operand(lowercase__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Union[str, Any] , a : str ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._validate_operand(lowercase__ )
return self.tuple < other.tuple
def __hash__( self : List[str] ) ->Union[str, Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def A_ ( cls : Dict , a : Optional[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Dict = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def A_ ( self : Dict ) ->Optional[int]:
return self.version_str
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = _VERSION_REG.match(SCREAMING_SNAKE_CASE__ )
if not res:
raise ValueError(f"""Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(SCREAMING_SNAKE_CASE__ ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def UpperCAmelCase ( _lowerCamelCase : List[str] ):
'''simple docstring'''
return ".".join(str(SCREAMING_SNAKE_CASE__ ) for v in version_tuple )
| 703 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__lowercase :Any = logging.get_logger(__name__)
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = None
@staticmethod
def A_ ( ) ->Any:
raise NotImplementedError
def A_ ( self : Tuple , a : Union[str, Any] , a : int , a : str , **a : str ) ->Dict:
raise NotImplementedError
def A_ ( self : Union[str, Any] , a : List[str] ) ->Union[str, Any]:
raise NotImplementedError
def A_ ( self : Optional[Any] ) ->Any:
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def A_ ( cls : Optional[Any] ) ->Union[str, Any]:
return f"""`pip install {cls.pip_package or cls.name}`"""
class _a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ = "optuna"
@staticmethod
def A_ ( ) ->int:
return is_optuna_available()
def A_ ( self : Optional[int] , a : Any , a : int , a : str , **a : Dict ) ->Optional[Any]:
return run_hp_search_optuna(__snake_case , __snake_case , __snake_case , **__snake_case )
def A_ ( self : List[str] , a : List[str] ) ->Optional[int]:
return default_hp_space_optuna(__snake_case )
class _a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ = "ray"
snake_case_ = "\'ray[tune]\'"
@staticmethod
def A_ ( ) ->int:
return is_ray_available()
def A_ ( self : Optional[int] , a : str , a : int , a : str , **a : Optional[int] ) ->Optional[Any]:
return run_hp_search_ray(__snake_case , __snake_case , __snake_case , **__snake_case )
def A_ ( self : Tuple , a : Any ) ->int:
return default_hp_space_ray(__snake_case )
class _a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ = "sigopt"
@staticmethod
def A_ ( ) ->str:
return is_sigopt_available()
def A_ ( self : Any , a : str , a : int , a : str , **a : Optional[int] ) ->List[Any]:
return run_hp_search_sigopt(__snake_case , __snake_case , __snake_case , **__snake_case )
def A_ ( self : Union[str, Any] , a : List[Any] ) ->List[Any]:
return default_hp_space_sigopt(__snake_case )
class _a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ = "wandb"
@staticmethod
def A_ ( ) ->List[Any]:
return is_wandb_available()
def A_ ( self : str , a : Any , a : int , a : str , **a : List[Any] ) ->Union[str, Any]:
return run_hp_search_wandb(__snake_case , __snake_case , __snake_case , **__snake_case )
def A_ ( self : Optional[Any] , a : List[Any] ) ->Optional[int]:
return default_hp_space_wandb(__snake_case )
__lowercase :Any = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = available_backends[0].name
if len(a_ ) > 1:
logger.info(
f"""{len(a_ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 704 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
"""simple docstring"""
snake_case_ = PegasusConfig
snake_case_ = {}
snake_case_ = "gelu"
def __init__( self : List[str] , a : Union[str, Any] , a : Tuple=13 , a : Any=7 , a : int=True , a : List[Any]=False , a : Dict=99 , a : Optional[Any]=32 , a : List[Any]=2 , a : List[Any]=4 , a : Tuple=37 , a : Dict=0.1 , a : List[str]=0.1 , a : Union[str, Any]=40 , a : Optional[int]=2 , a : Union[str, Any]=1 , a : List[str]=0 , ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = pad_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
def A_ ( self : Optional[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ : int = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ : Dict = prepare_pegasus_inputs_dict(a , a , a )
return config, inputs_dict
def A_ ( self : Tuple , a : int , a : str ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFPegasusModel(config=a ).get_decoder()
SCREAMING_SNAKE_CASE__ : List[Any] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Tuple = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : str = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : Tuple = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : str = model(a , attention_mask=a , head_mask=a , use_cache=a )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ : str = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE__ : Dict = model(a , attention_mask=a , past_key_values=a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : str = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a , a , rtol=1E-3 )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[int]=None , ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
snake_case_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
snake_case_ = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
def A_ ( self : Tuple ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[str] = TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=a )
def A_ ( self : str ) ->Optional[int]:
self.config_tester.run_common_tests()
def A_ ( self : Optional[int] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
snake_case_ = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
snake_case_ = "google/pegasus-xsum"
@cached_property
def A_ ( self : Optional[Any] ) ->Union[str, Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self : Any ) ->Any:
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A_ ( self : str , **a : Any ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.translate_src_text(**a )
assert self.expected_text == generated_words
def A_ ( self : List[Any] , **a : Tuple ) ->Any:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(self.src_text , **a , padding=a , return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=a , )
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=a )
return generated_words
@slow
def A_ ( self : List[str] ) ->List[str]:
self._assert_generated_batch_equal_expected()
| 705 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase :Optional[int] = logging.get_logger(__name__)
__lowercase :int = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class _a ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
snake_case_ = "convnextv2"
def __init__( self : List[Any] , a : Optional[int]=3 , a : str=4 , a : Tuple=4 , a : Dict=None , a : Union[str, Any]=None , a : List[Any]="gelu" , a : Tuple=0.02 , a : List[str]=1E-12 , a : Optional[int]=0.0 , a : int=2_24 , a : List[Any]=None , a : Tuple=None , **a : Tuple , ) ->Optional[int]:
super().__init__(**__a )
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : int = patch_size
SCREAMING_SNAKE_CASE__ : int = num_stages
SCREAMING_SNAKE_CASE__ : Tuple = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
SCREAMING_SNAKE_CASE__ : Any = [3, 3, 9, 3] if depths is None else depths
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = drop_path_rate
SCREAMING_SNAKE_CASE__ : Tuple = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE__ : str = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 706 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 0 |
import requests
__lowercase :Optional[Any] = "" # <-- Put your OpenWeatherMap appid here!
__lowercase :Any = "https://api.openweathermap.org/data/2.5/"
def UpperCAmelCase ( _lowerCamelCase : Dict = "Chicago" , _lowerCamelCase : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] = "Kolkata, India" , _lowerCamelCase : Union[str, Any] = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] = 5_5.6_8 , _lowerCamelCase : Dict = 1_2.5_7 , _lowerCamelCase : Tuple = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowercase :Tuple = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 707 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = StableUnCLIPImgaImgPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case_ = frozenset([] )
def A_ ( self : Optional[Any] ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[int] = 32
SCREAMING_SNAKE_CASE__ : Tuple = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCAmelCase , projection_dim=_lowerCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCAmelCase , layers_per_block=1 , upcast_attention=_lowerCAmelCase , use_linear_projection=_lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL()
SCREAMING_SNAKE_CASE__ : List[str] = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A_ ( self : List[Any] , a : int , a : Union[str, Any]=0 , a : Optional[int]=True ) ->Tuple:
if str(_lowerCAmelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
if pil_image:
SCREAMING_SNAKE_CASE__ : Dict = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ : Tuple = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ : Tuple = DiffusionPipeline.numpy_to_pil(_lowerCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A_ ( self : str ) ->Any:
SCREAMING_SNAKE_CASE__ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Any = StableUnCLIPImgaImgPipeline(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(_lowerCAmelCase )
inputs.update({"image_embeds": None} )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe(**_lowerCAmelCase ).images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Dict = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A_ ( self : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCAmelCase )
def A_ ( self : Tuple ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_lowerCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A_ ( self : int ) ->Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCAmelCase )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : int ) ->List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
SCREAMING_SNAKE_CASE__ : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(_lowerCAmelCase , "anime turle" , generator=_lowerCAmelCase , output_type="np" )
SCREAMING_SNAKE_CASE__ : str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
SCREAMING_SNAKE_CASE__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
SCREAMING_SNAKE_CASE__ : int = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str = pipe(_lowerCAmelCase , "anime turle" , generator=_lowerCAmelCase , output_type="np" )
SCREAMING_SNAKE_CASE__ : Any = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( self : Dict ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ : int = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(
_lowerCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 708 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 0 |
import requests
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = {"""Content-Type""": """application/json"""}
SCREAMING_SNAKE_CASE__ : Dict = requests.post(snake_case_ , json={"text": message_body} , headers=snake_case_ )
if response.status_code != 200:
SCREAMING_SNAKE_CASE__ : Any = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 709 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase :Tuple = logging.get_logger(__name__)
__lowercase :List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
snake_case_ = "camembert"
def __init__( self : int , a : Dict=3_05_22 , a : Union[str, Any]=7_68 , a : Union[str, Any]=12 , a : int=12 , a : Any=30_72 , a : Union[str, Any]="gelu" , a : Union[str, Any]=0.1 , a : Union[str, Any]=0.1 , a : str=5_12 , a : Tuple=2 , a : str=0.02 , a : Tuple=1E-12 , a : str=1 , a : Optional[int]=0 , a : str=2 , a : Union[str, Any]="absolute" , a : Any=True , a : int=None , **a : Any , ) ->Optional[int]:
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : str = classifier_dropout
class _a ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def A_ ( self : List[str] ) ->Tuple:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 710 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase :Optional[int] = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__lowercase :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 711 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 1
SCREAMING_SNAKE_CASE__ : Any = 2
while i * i <= n:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
while True:
i += 1
t_num += i
if count_divisors(__snake_case ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 712 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyInpaintPipeline
snake_case_ = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
snake_case_ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
snake_case_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def A_ ( self : Dict ) ->List[str]:
return 32
@property
def A_ ( self : Optional[int] ) ->Union[str, Any]:
return 32
@property
def A_ ( self : List[str] ) ->int:
return self.time_input_dim
@property
def A_ ( self : Optional[int] ) ->List[str]:
return self.time_input_dim * 4
@property
def A_ ( self : Optional[int] ) ->int:
return 1_00
@property
def A_ ( self : Union[str, Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def A_ ( self : List[str] ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
SCREAMING_SNAKE_CASE__ : Tuple = MultilingualCLIP(__A )
SCREAMING_SNAKE_CASE__ : int = text_encoder.eval()
return text_encoder
@property
def A_ ( self : List[str] ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UNetaDConditionModel(**__A )
return model
@property
def A_ ( self : Any ) ->Optional[int]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A_ ( self : str ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_unet
SCREAMING_SNAKE_CASE__ : str = self.dummy_movq
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__A , set_alpha_to_one=__A , steps_offset=1 , prediction_type="epsilon" , thresholding=__A , )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A_ ( self : Dict , a : Optional[int] , a : Union[str, Any]=0 ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__A ) ).to(__A )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__A )
# create init_image
SCREAMING_SNAKE_CASE__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
SCREAMING_SNAKE_CASE__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.fromarray(np.uinta(__A ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
SCREAMING_SNAKE_CASE__ : int = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : str = 0
if str(__A ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(__A )
else:
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device=__A ).manual_seed(__A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def A_ ( self : int ) ->Dict:
SCREAMING_SNAKE_CASE__ : Any = "cpu"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str = self.pipeline_class(**__A )
SCREAMING_SNAKE_CASE__ : Dict = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ : Dict = pipe(**self.get_dummy_inputs(__A ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def A_ ( self : Dict ) ->str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Any ) ->Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Any ) ->Any:
SCREAMING_SNAKE_CASE__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
SCREAMING_SNAKE_CASE__ : Any = np.ones((7_68, 7_68) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = "a hat"
SCREAMING_SNAKE_CASE__ : List[Any] = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__A )
SCREAMING_SNAKE_CASE__ : List[Any] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = pipe_prior(
__A , generator=__A , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE__ : Dict = pipeline(
__A , image=__A , mask_image=__A , image_embeds=__A , negative_image_embeds=__A , generator=__A , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
SCREAMING_SNAKE_CASE__ : Tuple = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__A , __A )
| 714 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase :Optional[Any] = logging.get_logger(__name__)
__lowercase :Any = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class _a ( __UpperCAmelCase ):
"""simple docstring"""
snake_case_ = '''owlvit_text_model'''
def __init__( self : str , a : Dict=4_94_08 , a : Optional[int]=5_12 , a : str=20_48 , a : int=12 , a : List[str]=8 , a : Dict=16 , a : Union[str, Any]="quick_gelu" , a : Optional[Any]=1E-5 , a : List[str]=0.0 , a : str=0.02 , a : List[Any]=1.0 , a : Dict=0 , a : Tuple=4_94_06 , a : List[Any]=4_94_07 , **a : Union[str, Any] , ) ->Optional[int]:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_factor
@classmethod
def A_ ( cls : Tuple , a : Union[str, os.PathLike] , **a : List[Any] ) ->Dict:
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
SCREAMING_SNAKE_CASE__ : Tuple = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class _a ( __UpperCAmelCase ):
"""simple docstring"""
snake_case_ = '''owlvit_vision_model'''
def __init__( self : str , a : Union[str, Any]=7_68 , a : Union[str, Any]=30_72 , a : List[Any]=12 , a : Any=12 , a : Union[str, Any]=3 , a : str=7_68 , a : Optional[Any]=32 , a : Union[str, Any]="quick_gelu" , a : List[str]=1E-5 , a : str=0.0 , a : List[str]=0.02 , a : Optional[int]=1.0 , **a : Union[str, Any] , ) ->Any:
super().__init__(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ : Tuple = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_factor
@classmethod
def A_ ( cls : Tuple , a : Union[str, os.PathLike] , **a : str ) ->Dict:
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
SCREAMING_SNAKE_CASE__ : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class _a ( __UpperCAmelCase ):
"""simple docstring"""
snake_case_ = '''owlvit'''
snake_case_ = True
def __init__( self : Optional[Any] , a : int=None , a : Optional[Any]=None , a : Dict=5_12 , a : str=2.6592 , a : Dict=True , **a : Optional[Any] , ) ->Dict:
super().__init__(**__SCREAMING_SNAKE_CASE )
if text_config is None:
SCREAMING_SNAKE_CASE__ : int = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
SCREAMING_SNAKE_CASE__ : List[Any] = OwlViTTextConfig(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : str = OwlViTVisionConfig(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : List[str] = projection_dim
SCREAMING_SNAKE_CASE__ : Dict = logit_scale_init_value
SCREAMING_SNAKE_CASE__ : Union[str, Any] = return_dict
SCREAMING_SNAKE_CASE__ : int = 1.0
@classmethod
def A_ ( cls : str , a : Union[str, os.PathLike] , **a : Any ) ->Union[str, Any]:
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@classmethod
def A_ ( cls : Union[str, Any] , a : Dict , a : Dict , **a : str ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : str = text_config
SCREAMING_SNAKE_CASE__ : Optional[int] = vision_config
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : int = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ : str = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ : str = self.__class__.model_type
return output
class _a ( __UpperCAmelCase ):
"""simple docstring"""
@property
def A_ ( self : Optional[Any] ) ->Tuple:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def A_ ( self : List[Any] ) ->Union[str, Any]:
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def A_ ( self : List[Any] ) ->Union[str, Any]:
return 1E-4
def A_ ( self : int , a : "ProcessorMixin" , a : int = -1 , a : int = -1 , a : Optional["TensorType"] = None , ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : Dict = super().generate_dummy_inputs(
processor.image_processor , batch_size=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
return {**text_input_dict, **image_input_dict}
@property
def A_ ( self : List[Any] ) ->Union[str, Any]:
return 14
| 715 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def UpperCAmelCase ( _lowerCamelCase : Callable[[int | float], int | float] , _lowerCamelCase : int | float , _lowerCamelCase : int | float , _lowerCamelCase : int = 100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = x_start
SCREAMING_SNAKE_CASE__ : Union[str, Any] = fnc(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0.0
for _ in range(_lowerCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
SCREAMING_SNAKE_CASE__ : Any = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE__ : Tuple = fnc(_lowerCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
SCREAMING_SNAKE_CASE__ : Tuple = xa
SCREAMING_SNAKE_CASE__ : Dict = fxa
return area
if __name__ == "__main__":
def UpperCAmelCase ( _lowerCamelCase : List[Any] ):
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
__lowercase :str = 10
while i <= 100_000:
print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 716 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 0 |
import argparse
import struct
import unittest
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , a : bytes ) ->None:
SCREAMING_SNAKE_CASE__ : int = data
# Initialize hash values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
0X6A09_E667,
0XBB67_AE85,
0X3C6E_F372,
0XA54F_F53A,
0X510E_527F,
0X9B05_688C,
0X1F83_D9AB,
0X5BE0_CD19,
]
# Initialize round constants
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
0X428A_2F98,
0X7137_4491,
0XB5C0_FBCF,
0XE9B5_DBA5,
0X3956_C25B,
0X59F1_11F1,
0X923F_82A4,
0XAB1C_5ED5,
0XD807_AA98,
0X1283_5B01,
0X2431_85BE,
0X550C_7DC3,
0X72BE_5D74,
0X80DE_B1FE,
0X9BDC_06A7,
0XC19B_F174,
0XE49B_69C1,
0XEFBE_4786,
0X0FC1_9DC6,
0X240C_A1CC,
0X2DE9_2C6F,
0X4A74_84AA,
0X5CB0_A9DC,
0X76F9_88DA,
0X983E_5152,
0XA831_C66D,
0XB003_27C8,
0XBF59_7FC7,
0XC6E0_0BF3,
0XD5A7_9147,
0X06CA_6351,
0X1429_2967,
0X27B7_0A85,
0X2E1B_2138,
0X4D2C_6DFC,
0X5338_0D13,
0X650A_7354,
0X766A_0ABB,
0X81C2_C92E,
0X9272_2C85,
0XA2BF_E8A1,
0XA81A_664B,
0XC24B_8B70,
0XC76C_51A3,
0XD192_E819,
0XD699_0624,
0XF40E_3585,
0X106A_A070,
0X19A4_C116,
0X1E37_6C08,
0X2748_774C,
0X34B0_BCB5,
0X391C_0CB3,
0X4ED8_AA4A,
0X5B9C_CA4F,
0X682E_6FF3,
0X748F_82EE,
0X78A5_636F,
0X84C8_7814,
0X8CC7_0208,
0X90BE_FFFA,
0XA450_6CEB,
0XBEF9_A3F7,
0XC671_78F2,
]
SCREAMING_SNAKE_CASE__ : int = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A_ ( a : bytes ) ->bytes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = b"\x80" + (b"\x00" * (63 - (len(__A ) + 8) % 64))
SCREAMING_SNAKE_CASE__ : Any = struct.pack(">Q" , (len(__A ) * 8) )
return data + padding + big_endian_integer
def A_ ( self : Dict ) ->None:
# Convert into blocks of 64 bytes
SCREAMING_SNAKE_CASE__ : str = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE__ : int = list(struct.unpack(">16L" , __A ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE__ : int = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE__ : int = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
SCREAMING_SNAKE_CASE__ : Optional[int] = self.ror(__A , 6 ) ^ self.ror(__A , 11 ) ^ self.ror(__A , 25 )
SCREAMING_SNAKE_CASE__ : List[str] = (e & f) ^ ((~e & 0XFFFF_FFFF) & g)
SCREAMING_SNAKE_CASE__ : List[str] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
SCREAMING_SNAKE_CASE__ : List[str] = self.ror(__A , 2 ) ^ self.ror(__A , 13 ) ^ self.ror(__A , 22 )
SCREAMING_SNAKE_CASE__ : List[str] = (a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE__ : List[Any] = (sa + maj) % 0X1_0000_0000
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
SCREAMING_SNAKE_CASE__ : Tuple = [a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE__ : int = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE__ : Optional[int] = "".join([hex(__A )[2:].zfill(8 ) for value in self.hashes] )
def A_ ( self : Optional[int] , a : int , a : int ) ->int:
return 0XFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->None:
import hashlib
SCREAMING_SNAKE_CASE__ : Optional[int] = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__A ).hash , hashlib.shaaaa(__A ).hexdigest() )
def UpperCAmelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f.read()
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = bytes(_lowercase , "utf-8" )
print(SHAaaa(_lowercase ).hash )
if __name__ == "__main__":
main()
| 717 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__lowercase :int = logging.get_logger(__name__)
@dataclass
class _a ( __snake_case ):
"""simple docstring"""
snake_case_ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Optional[Any] , **a : Tuple ) ->int:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = deprecated_arg[3:]
SCREAMING_SNAKE_CASE__ : Optional[int] = not kwargs.pop(_lowercase )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**_lowercase )
snake_case_ = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
snake_case_ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
snake_case_ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
snake_case_ = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def A_ ( self : Any ) ->int:
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE__ : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE__ : List[Any] = None
return tpu
@cached_property
def A_ ( self : Optional[Any] ) ->Optional[Any]:
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def A_ ( self : Optional[Any] ) ->Optional[int]:
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def A_ ( self : List[str] ) ->Dict:
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def A_ ( self : Optional[Any] ) ->Optional[Any]:
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def A_ ( self : Dict ) ->Tuple:
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def A_ ( self : Tuple ) ->Tuple:
return self.n_gpu > 0
| 718 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : int = len(lowercase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowercase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
if len(lowercase_ ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(lowercase_ ) // 2
SCREAMING_SNAKE_CASE__ : Optional[int] = arr[0:mid]
SCREAMING_SNAKE_CASE__ : int = arr[mid:]
SCREAMING_SNAKE_CASE__ : List[Any] = count_inversions_recursive(lowercase_ )
SCREAMING_SNAKE_CASE__ : Dict = count_inversions_recursive(lowercase_ )
SCREAMING_SNAKE_CASE__ : int = _count_cross_inversions(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : int = 0
while i < len(lowercase_ ) and j < len(lowercase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowercase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowercase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE__ : int = count_inversions_bf(lowercase_ )
SCREAMING_SNAKE_CASE__ : Tuple = count_inversions_recursive(lowercase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowercase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE__ : List[str] = count_inversions_bf(lowercase_ )
SCREAMING_SNAKE_CASE__ : Any = count_inversions_recursive(lowercase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowercase_ )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Tuple = count_inversions_bf(lowercase_ )
SCREAMING_SNAKE_CASE__ : List[str] = count_inversions_recursive(lowercase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowercase_ )
if __name__ == "__main__":
main()
| 719 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__lowercase :str = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowerCAmelCase__ )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
snake_case_ = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
snake_case_ = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
snake_case_ = list_field(
default=[8, 32, 1_28, 5_12] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Use FP16 to accelerate inference."} )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Benchmark training of model"} )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Verbose memory tracing"} )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Trace memory line by line"} )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Save result to a CSV file"} )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Save all print statements in a log file"} )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to print environment information"} )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
snake_case_ = field(
default=f'inference_time_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving time results to csv."} , )
snake_case_ = field(
default=f'inference_memory_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
snake_case_ = field(
default=f'train_time_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
snake_case_ = field(
default=f'train_memory_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
snake_case_ = field(
default=f'env_info_{round(time() )}.csv' , metadata={"help": "CSV filename used if saving environment information."} , )
snake_case_ = field(
default=f'log_{round(time() )}.csv' , metadata={"help": "Log filename used if print statements are saved in log."} , )
snake_case_ = field(default=3 , metadata={"help": "Times an experiment will be run."} )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def A_ ( self : List[str] ) ->Tuple:
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , _a , )
def A_ ( self : Tuple ) ->int:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def A_ ( self : str ) ->Union[str, Any]:
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = [\'bert-base-cased\']." )
return self.models
@property
def A_ ( self : Dict ) ->str:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 720 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "AutoImageProcessor"
snake_case_ = "AutoTokenizer"
def __init__( self : str , a : Union[str, Any] , a : Union[str, Any] ) ->List[str]:
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor
def __call__( self : List[Any] , a : List[Any]=None , a : Optional[int]=None , a : List[Any]=None , **a : Dict ) ->Dict:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
SCREAMING_SNAKE_CASE__ : str = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def A_ ( self : Dict , *a : Optional[int] , **a : Dict ) ->Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def A_ ( self : Dict , *a : Optional[Any] , **a : Optional[int] ) ->Any:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def A_ ( self : Dict ) ->Optional[Any]:
return ["input_ids", "attention_mask", "pixel_values"]
| 721 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :Optional[Any] = logging.get_logger(__name__)
__lowercase :Union[str, Any] = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class _a ( snake_case_ ):
"""simple docstring"""
snake_case_ = """transfo-xl"""
snake_case_ = ["""mems"""]
snake_case_ = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[Any] , a : List[str]=26_77_35 , a : Optional[int]=[2_00_00, 4_00_00, 20_00_00] , a : Optional[int]=10_24 , a : Union[str, Any]=10_24 , a : Tuple=16 , a : List[Any]=64 , a : int=40_96 , a : str=4 , a : Union[str, Any]=False , a : List[str]=18 , a : Optional[Any]=16_00 , a : List[str]=10_00 , a : Union[str, Any]=True , a : int=True , a : str=0 , a : List[Any]=-1 , a : int=True , a : Dict=0.1 , a : Tuple=0.0 , a : Dict=True , a : List[Any]="normal" , a : Union[str, Any]=0.01 , a : int=0.01 , a : List[Any]=0.02 , a : List[Any]=1E-5 , a : Tuple=0 , **a : Optional[Any] , ) ->List[str]:
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = []
self.cutoffs.extend(a )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE__ : List[str] = [False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE__ : int = [False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE__ : List[str] = d_model
SCREAMING_SNAKE_CASE__ : Dict = d_embed
SCREAMING_SNAKE_CASE__ : Optional[int] = d_head
SCREAMING_SNAKE_CASE__ : Optional[Any] = d_inner
SCREAMING_SNAKE_CASE__ : Optional[int] = div_val
SCREAMING_SNAKE_CASE__ : str = pre_lnorm
SCREAMING_SNAKE_CASE__ : str = n_layer
SCREAMING_SNAKE_CASE__ : Optional[int] = n_head
SCREAMING_SNAKE_CASE__ : Optional[int] = mem_len
SCREAMING_SNAKE_CASE__ : Dict = same_length
SCREAMING_SNAKE_CASE__ : Any = attn_type
SCREAMING_SNAKE_CASE__ : Tuple = clamp_len
SCREAMING_SNAKE_CASE__ : Any = sample_softmax
SCREAMING_SNAKE_CASE__ : List[str] = adaptive
SCREAMING_SNAKE_CASE__ : Tuple = dropout
SCREAMING_SNAKE_CASE__ : Tuple = dropatt
SCREAMING_SNAKE_CASE__ : int = untie_r
SCREAMING_SNAKE_CASE__ : Optional[int] = init
SCREAMING_SNAKE_CASE__ : Optional[Any] = init_range
SCREAMING_SNAKE_CASE__ : List[Any] = proj_init_std
SCREAMING_SNAKE_CASE__ : List[str] = init_std
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=a , **a )
@property
def A_ ( self : List[Any] ) ->Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A_ ( self : List[str] , a : Optional[int] ) ->str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 700 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if (ksize % 2) == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ksize + 1
SCREAMING_SNAKE_CASE__ : Any = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE_ ):
for x in range(SCREAMING_SNAKE_CASE_ ):
# distance from center
SCREAMING_SNAKE_CASE__ : int = x - ksize // 2
SCREAMING_SNAKE_CASE__ : List[str] = y - ksize // 2
# degree to radiant
SCREAMING_SNAKE_CASE__ : Optional[Any] = theta / 180 * np.pi
SCREAMING_SNAKE_CASE__ : List[Any] = np.cos(_theta )
SCREAMING_SNAKE_CASE__ : str = np.sin(_theta )
# get kernel x
SCREAMING_SNAKE_CASE__ : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
SCREAMING_SNAKE_CASE__ : int = -sin_theta * px + cos_theta * py
# fill kernel
SCREAMING_SNAKE_CASE__ : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowercase :Union[str, Any] = imread("../image_data/lena.jpg")
# turn image in gray scale value
__lowercase :Any = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowercase :Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__lowercase :Dict = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowercase :Dict = out / out.max() * 255
__lowercase :Tuple = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 701 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCAmelCase ( _lowerCamelCase : List[Any] ):
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , a : nn.Module , a : int ) ->Dict:
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[Any] = module
SCREAMING_SNAKE_CASE__ : Dict = nn.Sequential(
nn.Linear(module.in_features , a , bias=a ) , nn.Linear(a , module.out_features , bias=a ) , )
SCREAMING_SNAKE_CASE__ : List[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def A_ ( self : List[str] , a : Any , *a : int , **a : Tuple ) ->Tuple:
return self.module(a , *a , **a ) + self.adapter(a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = "bigscience/bloom-1b7"
# Constant values
snake_case_ = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
snake_case_ = "Hello my name is"
snake_case_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
snake_case_ = 10
def A_ ( self : Any ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained(self.model_name )
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : List[str] ) ->List[str]:
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map="auto" )
def A_ ( self : str ) ->Optional[Any]:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Any ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_abit.config
self.assertTrue(hasattr(a , "quantization_config" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = config.to_dict()
SCREAMING_SNAKE_CASE__ : List[str] = config.to_diff_dict()
SCREAMING_SNAKE_CASE__ : Optional[int] = config.to_json_string()
def A_ ( self : Optional[Any] ) ->Optional[Any]:
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def A_ ( self : str ) ->Tuple:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def A_ ( self : Optional[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(self.input_text , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ : List[str] = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
def A_ ( self : Optional[Any] ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , device_map="auto" )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(self.input_text , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ : int = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
def A_ ( self : str ) ->Dict:
with self.assertRaises(a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(a )
def A_ ( self : List[str] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BitsAndBytesConfig()
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , load_in_abit=a , device_map="auto" , bnb_abit_quant_type="nf4" , )
def A_ ( self : str ) ->Optional[Any]:
with self.assertRaises(a ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(self.input_text , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ : List[str] = self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_fpaa.to("cpu" )
# Check this does not throw an error
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_fpaa.float()
def A_ ( self : str ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=a , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A_ ( cls : Optional[int] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : str = 't5-small'
SCREAMING_SNAKE_CASE__ : Optional[int] = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE__ : Optional[int] = 'Translate in German: Hello, my dog is cute'
def A_ ( self : Optional[int] ) ->Dict:
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Any ) ->List[str]:
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE__ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Tuple = None
# test with `t5-small`
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map="auto" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE__ : Any = model.generate(**a )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE__ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map="auto" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(**a )
SCREAMING_SNAKE_CASE__ : Any = modules
def A_ ( self : List[str] ) ->Tuple:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE__ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate(**a )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE__ : Any = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map="auto" )
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(**a )
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) ->List[str]:
super().setUp()
# model_name
SCREAMING_SNAKE_CASE__ : Dict = 'bigscience/bloom-560m'
SCREAMING_SNAKE_CASE__ : int = 't5-small'
# Different types of model
SCREAMING_SNAKE_CASE__ : str = AutoModel.from_pretrained(self.model_name , load_in_abit=a , device_map="auto" )
# Sequence classification model
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=a , device_map="auto" )
# CausalLM model
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map="auto" )
# Seq2seq model
SCREAMING_SNAKE_CASE__ : str = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=a , device_map="auto" )
def A_ ( self : List[Any] ) ->Optional[int]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Optional[int] ) ->List[str]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : int ) ->List[Any]:
super().setUp()
def A_ ( self : List[Any] ) ->Dict:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE__ : Optional[int] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) ->int:
super().setUp()
def A_ ( self : Optional[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=a , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
SCREAMING_SNAKE_CASE__ : Any = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Union[str, Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = 'facebook/opt-350m'
super().setUp()
def A_ ( self : List[str] ) ->Union[str, Any]:
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE__ : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE__ : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE__ : List[str] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(a ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = LoRALayer(module.q_proj , rank=16 )
SCREAMING_SNAKE_CASE__ : Optional[int] = LoRALayer(module.k_proj , rank=16 )
SCREAMING_SNAKE_CASE__ : Any = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE__ : Tuple = model.forward(**a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(a , a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "gpt2-xl"
snake_case_ = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 702 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : bool = True , _lowerCamelCase : float = math.inf , _lowerCamelCase : float = -math.inf , _lowerCamelCase : float = math.inf , _lowerCamelCase : float = -math.inf , _lowerCamelCase : bool = False , _lowerCamelCase : float = 100 , _lowerCamelCase : float = 0.0_1 , _lowerCamelCase : float = 1 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Any = search_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = start_temperate
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = None
while not search_end:
SCREAMING_SNAKE_CASE__ : Optional[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
SCREAMING_SNAKE_CASE__ : str = current_state
scores.append(__lowercase )
iterations += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Any = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
SCREAMING_SNAKE_CASE__ : Optional[int] = random.randint(0 , len(__lowercase ) - 1 ) # picking a random neighbor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = neighbors.pop(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = picked_neighbor
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
SCREAMING_SNAKE_CASE__ : List[str] = picked_neighbor
SCREAMING_SNAKE_CASE__ : Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
SCREAMING_SNAKE_CASE__ : str = True
else:
SCREAMING_SNAKE_CASE__ : List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__lowercase ) , __lowercase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowercase :Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase :str = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
# starting the problem with initial coordinates (12, 47)
__lowercase :int = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase :Optional[Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
__lowercase :Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase :int = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f"{local_min.score()}"
)
__lowercase :str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase :str = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f"{local_min.score()}"
)
| 703 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__lowercase :Union[str, Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=__lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=__lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(
default=__lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case_ = field(
default=__lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case_ = field(
default=__lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(default=__lowercase , metadata={"help": "The input training data file (a text file)."} )
snake_case_ = field(
default=__lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
snake_case_ = field(
default=__lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
snake_case_ = field(
default=__lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
snake_case_ = field(
default=__lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=__lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
snake_case_ = field(
default=__lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=__lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def A_ ( self : Optional[int] ) ->Any:
if self.train_file is not None:
SCREAMING_SNAKE_CASE__ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE__ : List[str] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = True
snake_case_ = None
snake_case_ = None
def __call__( self : Union[str, Any] , a : Any ) ->str:
SCREAMING_SNAKE_CASE__ : Dict = "label" if "label" in features[0].keys() else "labels"
SCREAMING_SNAKE_CASE__ : Optional[Any] = [feature.pop(_A ) for feature in features]
SCREAMING_SNAKE_CASE__ : List[str] = len(_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(features[0]["input_ids"] )
SCREAMING_SNAKE_CASE__ : List[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
SCREAMING_SNAKE_CASE__ : Tuple = list(chain(*_A ) )
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
SCREAMING_SNAKE_CASE__ : Optional[Any] = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(_A , dtype=torch.intaa )
return batch
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(__snake_case )
datasets.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE__ : Any = data_args.validation_file
SCREAMING_SNAKE_CASE__ : int = data_args.train_file.split("." )[-1]
SCREAMING_SNAKE_CASE__ : str = load_dataset(
__snake_case , data_files=__snake_case , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
SCREAMING_SNAKE_CASE__ : Dict = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
SCREAMING_SNAKE_CASE__ : List[Any] = [f"""ending{i}""" for i in range(4 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = "sent1"
SCREAMING_SNAKE_CASE__ : Optional[int] = "sent2"
if data_args.max_seq_length is None:
SCREAMING_SNAKE_CASE__ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
SCREAMING_SNAKE_CASE__ : Tuple = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE__ : str = [[context] * 4 for context in examples[context_name]]
SCREAMING_SNAKE_CASE__ : Optional[int] = examples[question_header_name]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__snake_case )
]
# Flatten out
SCREAMING_SNAKE_CASE__ : Optional[int] = list(chain(*__snake_case ) )
SCREAMING_SNAKE_CASE__ : Tuple = list(chain(*__snake_case ) )
# Tokenize
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(
__snake_case , __snake_case , truncation=__snake_case , max_length=__snake_case , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__snake_case ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
SCREAMING_SNAKE_CASE__ : List[Any] = raw_datasets["train"]
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(len(__snake_case ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE__ : Dict = train_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
SCREAMING_SNAKE_CASE__ : List[str] = train_dataset.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
SCREAMING_SNAKE_CASE__ : Dict = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ : Dict = min(len(__snake_case ) , data_args.max_eval_samples )
SCREAMING_SNAKE_CASE__ : Tuple = eval_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
SCREAMING_SNAKE_CASE__ : List[Any] = eval_dataset.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
SCREAMING_SNAKE_CASE__ : Dict = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__snake_case , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowerCamelCase : str ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = eval_predictions
SCREAMING_SNAKE_CASE__ : Tuple = np.argmax(__snake_case , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Trainer(
model=__snake_case , args=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = last_checkpoint
SCREAMING_SNAKE_CASE__ : Optional[int] = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE__ : str = train_result.metrics
SCREAMING_SNAKE_CASE__ : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
SCREAMING_SNAKE_CASE__ : int = min(__snake_case , len(__snake_case ) )
trainer.log_metrics("train" , __snake_case )
trainer.save_metrics("train" , __snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : Optional[int] = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case )
SCREAMING_SNAKE_CASE__ : int = min(__snake_case , len(__snake_case ) )
trainer.log_metrics("eval" , __snake_case )
trainer.save_metrics("eval" , __snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 704 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( __a ):
"""simple docstring"""
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''BlipImageProcessor'''
snake_case_ = '''AutoTokenizer'''
def __init__( self : List[Any] , a : Any , a : Dict ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = False
super().__init__(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor
def __call__( self : List[str] , a : ImageInput = None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ) ->Tuple:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
SCREAMING_SNAKE_CASE__ : Any = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def A_ ( self : Dict , *a : int , **a : List[str] ) ->Tuple:
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def A_ ( self : str , *a : int , **a : int ) ->Optional[int]:
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A_ ( self : List[str] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 705 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :int = logging.get_logger(__name__)
__lowercase :str = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class _a ( __lowerCamelCase ):
"""simple docstring"""
snake_case_ = '''bridgetower_vision_model'''
def __init__( self : Any , a : Any=7_68 , a : Dict=12 , a : List[Any]=3 , a : int=16 , a : Tuple=2_88 , a : Any=1 , a : int=1E-05 , a : Optional[int]=False , a : Any=True , a : List[str]=False , **a : Any , ) ->List[str]:
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Any = initializer_factor
SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = stop_gradient
SCREAMING_SNAKE_CASE__ : Optional[Any] = share_layernorm
SCREAMING_SNAKE_CASE__ : Union[str, Any] = remove_last_layer
@classmethod
def A_ ( cls : int , a : List[Any] , **a : Union[str, Any] ) ->str:
SCREAMING_SNAKE_CASE__ : str = cls.get_config_dict(a_ , **a_ )
if config_dict.get("model_type" ) == "bridgetower":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a_ , **a_ )
class _a ( __lowerCamelCase ):
"""simple docstring"""
snake_case_ = '''bridgetower_text_model'''
def __init__( self : List[Any] , a : Union[str, Any]=5_02_65 , a : Any=7_68 , a : Tuple=12 , a : List[Any]=12 , a : Optional[Any]=1 , a : int=30_72 , a : Any="gelu" , a : List[str]=0.1 , a : Any=0.1 , a : Any=5_14 , a : Union[str, Any]=1 , a : Tuple=1E-05 , a : Optional[Any]=1 , a : str=0 , a : Optional[Any]=2 , a : Optional[Any]="absolute" , a : List[str]=True , **a : Optional[int] , ) ->Any:
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = initializer_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = position_embedding_type
SCREAMING_SNAKE_CASE__ : Optional[int] = use_cache
SCREAMING_SNAKE_CASE__ : Any = pad_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = bos_token_id
SCREAMING_SNAKE_CASE__ : Tuple = eos_token_id
@classmethod
def A_ ( cls : Tuple , a : List[str] , **a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = cls.get_config_dict(a_ , **a_ )
if config_dict.get("model_type" ) == "bridgetower":
SCREAMING_SNAKE_CASE__ : List[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a_ , **a_ )
class _a ( __lowerCamelCase ):
"""simple docstring"""
snake_case_ = '''bridgetower'''
def __init__( self : Any , a : List[Any]=True , a : Tuple="gelu" , a : str=7_68 , a : Dict=1 , a : Dict=1E-05 , a : List[str]=False , a : Union[str, Any]="add" , a : List[Any]=12 , a : int=6 , a : Union[str, Any]=False , a : int=False , a : Tuple=None , a : List[str]=None , **a : str , ) ->Tuple:
# TODO: remove this once the Hub files are updated.
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("text_config_dict" , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("vision_config_dict" , a_ )
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = share_cross_modal_transformer_layers
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_factor
SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = share_link_tower_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = link_tower_type
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
SCREAMING_SNAKE_CASE__ : int = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE__ : Tuple = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
SCREAMING_SNAKE_CASE__ : Tuple = BridgeTowerTextConfig(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = BridgeTowerVisionConfig(**a_ )
@classmethod
def A_ ( cls : List[Any] , a : Dict , a : List[Any] , **a : Optional[Any] ) ->Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a_ )
def A_ ( self : str ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ : Dict = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ : str = self.__class__.model_type
return output
| 706 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 0 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ("foo.json",)] )
def A_ ( self : Optional[int] , a : str ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Dict = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
SCREAMING_SNAKE_CASE__ : List[str] = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def A_ ( self : Union[str, Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoConfig.from_pretrained("gpt2" )
SCREAMING_SNAKE_CASE__ : List[Any] = GenerationConfig.from_model_config(a )
SCREAMING_SNAKE_CASE__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def A_ ( self : Optional[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = GenerationConfig()
SCREAMING_SNAKE_CASE__ : Any = {
"max_new_tokens": 10_24,
"foo": "bar",
}
SCREAMING_SNAKE_CASE__ : str = copy.deepcopy(a )
SCREAMING_SNAKE_CASE__ : List[Any] = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {"foo": "bar"} )
def A_ ( self : Dict ) ->List[str]:
SCREAMING_SNAKE_CASE__ : str = GenerationConfig()
SCREAMING_SNAKE_CASE__ : Dict = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : List[str] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
SCREAMING_SNAKE_CASE__ : List[Any] = GenerationConfig.from_model_config(a )
assert not hasattr(a , "foo" ) # no new kwargs should be initialized if from config
def A_ ( self : List[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
SCREAMING_SNAKE_CASE__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : List[Any] = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A_ ( cls : int ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : Any ) ->List[str]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def A_ ( self : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : int = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id="test-generation-config" , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Tuple = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def A_ ( self : Tuple ) ->Any:
SCREAMING_SNAKE_CASE__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id="valid_org/test-generation-config-org" , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : List[Any] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 707 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 708 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 0 |
from ...processing_utils import ProcessorMixin
class _a ( _A ):
"""simple docstring"""
snake_case_ = "WhisperFeatureExtractor"
snake_case_ = "WhisperTokenizer"
def __init__( self : Any , a : Dict , a : Optional[Any] ) ->List[Any]:
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.feature_extractor
SCREAMING_SNAKE_CASE__ : Dict = False
def A_ ( self : Dict , a : Dict=None , a : Dict=None , a : Any=True ) ->Tuple:
return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase__ , language=UpperCamelCase__ , no_timestamps=UpperCamelCase__ )
def __call__( self : str , *a : Dict , **a : List[str] ) ->Dict:
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("audio" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("sampling_rate" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("text" , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
SCREAMING_SNAKE_CASE__ : List[Any] = args[0]
SCREAMING_SNAKE_CASE__ : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : Any = encodings["input_ids"]
return inputs
def A_ ( self : Optional[int] , *a : str , **a : Tuple ) ->List[Any]:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def A_ ( self : int , *a : Dict , **a : str ) ->Optional[int]:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
def A_ ( self : Dict , a : str , a : List[str]="np" ) ->List[str]:
return self.tokenizer.get_prompt_ids(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
| 709 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__lowercase :Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :int = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__lowercase :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] = 0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = length or len(__snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE__ : str = True
return list_data if not swapped else bubble_sort(__snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase :List[Any] = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Any = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :int = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__lowercase :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
__lowercase = 6_378_137.0
__lowercase = 6_356_752.314_245
__lowercase = 6_378_137
def UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A
SCREAMING_SNAKE_CASE__ : Tuple = atan((1 - flattening) * tan(radians(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(A__ ) ) )
SCREAMING_SNAKE_CASE__ : List[str] = radians(A__ )
SCREAMING_SNAKE_CASE__ : Dict = radians(A__ )
# Equation
SCREAMING_SNAKE_CASE__ : List[Any] = sin((phi_a - phi_a) / 2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
SCREAMING_SNAKE_CASE__ : Any = sqrt(sin_sq_phi + (cos(A__ ) * cos(A__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self : int , a : str , a : Optional[Any]=2 , a : Dict=True , a : Optional[int]=False , a : Any=10 , a : str=3 , a : Tuple=32 * 8 , a : Optional[Any]=32 * 8 , a : str=4 , a : Tuple=64 , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : int = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ : Any = num_queries
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : int = min_size
SCREAMING_SNAKE_CASE__ : int = max_size
SCREAMING_SNAKE_CASE__ : Tuple = num_labels
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dim
SCREAMING_SNAKE_CASE__ : int = hidden_dim
def A_ ( self : Tuple ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
a )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=a )
SCREAMING_SNAKE_CASE__ : Optional[int] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=a ) > 0.5
).float()
SCREAMING_SNAKE_CASE__ : Any = (torch.rand((self.batch_size, self.num_labels) , device=a ) > 0.5).long()
SCREAMING_SNAKE_CASE__ : str = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A_ ( self : str ) ->Any:
SCREAMING_SNAKE_CASE__ : Optional[int] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE__ : Any = self.num_queries
SCREAMING_SNAKE_CASE__ : str = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 64
SCREAMING_SNAKE_CASE__ : int = 1_28
SCREAMING_SNAKE_CASE__ : Tuple = self.hidden_dim
SCREAMING_SNAKE_CASE__ : List[Any] = self.hidden_dim
SCREAMING_SNAKE_CASE__ : Any = self.hidden_dim
return config
def A_ ( self : int ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def A_ ( self : List[str] , a : List[Any] , a : Optional[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : int = output.encoder_hidden_states
SCREAMING_SNAKE_CASE__ : List[Any] = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a ) , config.decoder_layers )
def A_ ( self : Tuple , a : Optional[Any] , a : int , a : List[str] , a : Any=False ) ->str:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = MaskaFormerModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(pixel_values=a , pixel_mask=a )
SCREAMING_SNAKE_CASE__ : int = model(a , output_hidden_states=a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(a , a )
def A_ ( self : Tuple , a : Optional[Any] , a : List[Any] , a : Tuple , a : Union[str, Any] , a : Union[str, Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : int = MaskaFormerForUniversalSegmentation(config=a )
model.to(a )
model.eval()
def comm_check_on_output(a : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(pixel_values=a , pixel_mask=a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a )
comm_check_on_output(a )
SCREAMING_SNAKE_CASE__ : int = model(
pixel_values=a , pixel_mask=a , mask_labels=a , class_labels=a )
comm_check_on_output(a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _a ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Optional[int] ) ->Dict:
SCREAMING_SNAKE_CASE__ : str = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a , has_text_modality=a )
def A_ ( self : int ) ->Optional[int]:
self.config_tester.run_common_tests()
def A_ ( self : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(a , **a , output_hidden_states=a )
def A_ ( self : int ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*a )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def A_ ( self : List[Any] ) ->List[str]:
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def A_ ( self : List[str] ) ->List[Any]:
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def A_ ( self : Optional[Any] ) ->Union[str, Any]:
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def A_ ( self : Optional[int] ) ->List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def A_ ( self : int ) ->Optional[int]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A_ ( self : Dict ) ->List[Any]:
pass
def A_ ( self : int ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
@slow
def A_ ( self : Any ) ->str:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE__ : Dict = MaskaFormerModel.from_pretrained(a )
self.assertIsNotNone(a )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=a ),
"mask_labels": torch.randn((2, 10, *size) , device=a ),
"class_labels": torch.zeros(2 , 10 , device=a ).long(),
}
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.get_config()
SCREAMING_SNAKE_CASE__ : Any = MaskaFormerForUniversalSegmentation(a ).to(a )
SCREAMING_SNAKE_CASE__ : Tuple = model(**a )
self.assertTrue(outputs.loss is not None )
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(a , **a , output_hidden_states=a )
def A_ ( self : Optional[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(a ).to(a )
SCREAMING_SNAKE_CASE__ : Any = model(**a , output_attentions=a )
self.assertTrue(outputs.attentions is not None )
def A_ ( self : Union[str, Any] ) ->List[str]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Tuple = model_class(a )
model.to(a )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a , mask_labels=a , class_labels=a ).loss
loss.backward()
def A_ ( self : Optional[Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Any = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(a ).to(a )
model.train()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a , mask_labels=a , class_labels=a )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowercase :Union[str, Any] = 1e-4
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self : Dict ) ->int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A_ ( self : Tuple ) ->Optional[int]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A_ ( self : Optional[int] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(a , return_tensors="pt" ).to(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a , (1, 3, 3_84, 3_84) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**a )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , a , atol=a ) )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , a , atol=a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , a , atol=a ) )
def A_ ( self : str ) ->Any:
SCREAMING_SNAKE_CASE__ : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(a ).eval()
SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Dict = image_processor(a , return_tensors="pt" ).to(a )
SCREAMING_SNAKE_CASE__ : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a , (1, 3, 3_84, 3_84) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model(**a )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(a ).to(a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a , atol=a ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : str = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=a ) )
def A_ ( self : Union[str, Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(a ).eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs["pixel_values"].to(a )
SCREAMING_SNAKE_CASE__ : List[Any] = [el.to(a ) for el in inputs["mask_labels"]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [el.to(a ) for el in inputs["class_labels"]]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**a )
self.assertTrue(outputs.loss is not None )
| 714 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__lowercase :Optional[Any] = get_logger(__name__)
__lowercase :str = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _a :
"""simple docstring"""
@add_start_docstrings(lowercase_ )
def __call__( self : List[Any] , a : jnp.ndarray , a : jnp.ndarray ) ->jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _a :
"""simple docstring"""
@add_start_docstrings(lowercase_ )
def __call__( self : Tuple , a : jnp.ndarray , a : jnp.ndarray ) ->jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _a ( lowercase__ ):
"""simple docstring"""
@add_start_docstrings(lowercase_ )
def __call__( self : Optional[int] , a : jnp.ndarray , a : jnp.ndarray , a : int , **a : Dict ) ->jnp.ndarray:
for processor in self:
SCREAMING_SNAKE_CASE__ : int = inspect.signature(processor.__call__ ).parameters
if len(lowercase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"""Make sure that all the required parameters: {list(function_args.keys() )} for """
f"""{processor.__class__} are passed to the logits processor.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
else:
SCREAMING_SNAKE_CASE__ : str = processor(lowercase_ , lowercase_ , lowercase_ )
return scores
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , a : float ) ->Tuple:
if not isinstance(lowercase_ , lowercase_ ) or not (temperature > 0):
raise ValueError(f"""`temperature` has to be a strictly positive float, but is {temperature}""" )
SCREAMING_SNAKE_CASE__ : Any = temperature
def __call__( self : Any , a : jnp.ndarray , a : jnp.ndarray , a : int ) ->jnp.ndarray:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scores / self.temperature
return scores
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : float , a : float = -float("Inf" ) , a : int = 1 ) ->Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(lowercase_ , lowercase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
SCREAMING_SNAKE_CASE__ : Tuple = top_p
SCREAMING_SNAKE_CASE__ : Optional[Any] = filter_value
SCREAMING_SNAKE_CASE__ : Tuple = min_tokens_to_keep
def __call__( self : Any , a : jnp.ndarray , a : jnp.ndarray , a : int ) ->jnp.ndarray:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = lax.top_k(lowercase_ , scores.shape[-1] )
SCREAMING_SNAKE_CASE__ : Any = jnp.full_like(lowercase_ , self.filter_value )
SCREAMING_SNAKE_CASE__ : Tuple = jax.nn.softmax(lowercase_ , axis=-1 ).cumsum(axis=-1 )
SCREAMING_SNAKE_CASE__ : Dict = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
SCREAMING_SNAKE_CASE__ : List[str] = jnp.roll(lowercase_ , 1 )
score_mask |= score_mask.at[:, 0].set(lowercase_ )
# min tokens to keep
SCREAMING_SNAKE_CASE__ : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(lowercase_ )
SCREAMING_SNAKE_CASE__ : str = jnp.where(lowercase_ , lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ : Any = jax.lax.sort_key_val(lowercase_ , lowercase_ )[-1]
return next_scores
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : int , a : float = -float("Inf" ) , a : int = 1 ) ->Any:
if not isinstance(lowercase_ , lowercase_ ) or top_k <= 0:
raise ValueError(f"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
SCREAMING_SNAKE_CASE__ : List[Any] = max(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ : Any = filter_value
def __call__( self : List[Any] , a : jnp.ndarray , a : jnp.ndarray , a : int ) ->jnp.ndarray:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scores.shape
SCREAMING_SNAKE_CASE__ : Tuple = jnp.full(batch_size * vocab_size , self.filter_value )
SCREAMING_SNAKE_CASE__ : str = min(self.top_k , scores.shape[-1] ) # Safety check
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = lax.top_k(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ : Tuple = jnp.broadcast_to((jnp.arange(lowercase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
SCREAMING_SNAKE_CASE__ : List[str] = topk_scores.flatten()
SCREAMING_SNAKE_CASE__ : Optional[Any] = topk_indices.flatten() + shift
SCREAMING_SNAKE_CASE__ : List[Any] = next_scores_flat.at[topk_indices_flat].set(lowercase_ )
SCREAMING_SNAKE_CASE__ : str = next_scores_flat.reshape(lowercase_ , lowercase_ )
return next_scores
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : int ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = bos_token_id
def __call__( self : Union[str, Any] , a : jnp.ndarray , a : jnp.ndarray , a : int ) ->jnp.ndarray:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE__ : Tuple = 1 - jnp.bool_(cur_len - 1 )
SCREAMING_SNAKE_CASE__ : Dict = jnp.where(lowercase_ , new_scores.at[:, self.bos_token_id].set(0 ) , lowercase_ )
return scores
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , a : int , a : int ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_length
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
def __call__( self : str , a : jnp.ndarray , a : jnp.ndarray , a : int ) ->jnp.ndarray:
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.full(scores.shape , -float("inf" ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.where(lowercase_ , new_scores.at[:, self.eos_token_id].set(0 ) , lowercase_ )
return scores
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : int , a : int ) ->Dict:
if not isinstance(lowercase_ , lowercase_ ) or min_length < 0:
raise ValueError(f"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(lowercase_ , lowercase_ ) or eos_token_id < 0:
raise ValueError(f"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
SCREAMING_SNAKE_CASE__ : Any = min_length
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
def __call__( self : Tuple , a : jnp.ndarray , a : jnp.ndarray , a : int ) ->jnp.ndarray:
SCREAMING_SNAKE_CASE__ : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
SCREAMING_SNAKE_CASE__ : Dict = jnp.where(lowercase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , lowercase_ )
return scores
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple , a : List[Any] , a : int ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = list(lowercase_ )
SCREAMING_SNAKE_CASE__ : Dict = begin_index
def __call__( self : Tuple , a : int , a : Dict , a : int ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = 1 - jnp.bool_(cur_len - self.begin_index )
SCREAMING_SNAKE_CASE__ : Any = jnp.where(lowercase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , lowercase_ )
return scores
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , a : list ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = list(lowercase_ )
def __call__( self : Union[str, Any] , a : jnp.ndarray , a : jnp.ndarray , a : int ) ->jnp.ndarray:
SCREAMING_SNAKE_CASE__ : Any = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : str , a : Union[str, Any] ) ->Any:
SCREAMING_SNAKE_CASE__ : List[Any] = dict(lowercase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
SCREAMING_SNAKE_CASE__ : List[str] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = force_token_array.at[index].set(lowercase_ )
SCREAMING_SNAKE_CASE__ : List[str] = jnp.intaa(lowercase_ )
def __call__( self : Tuple , a : jnp.ndarray , a : jnp.ndarray , a : int ) ->jnp.ndarray:
def _force_token(a : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = scores.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.force_token_array[generation_idx]
SCREAMING_SNAKE_CASE__ : List[Any] = jnp.ones_like(lowercase_ , dtype=scores.dtype ) * -float("inf" )
SCREAMING_SNAKE_CASE__ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
SCREAMING_SNAKE_CASE__ : List[str] = lax.dynamic_update_slice(lowercase_ , lowercase_ , (0, current_token) )
return new_scores
SCREAMING_SNAKE_CASE__ : List[Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowercase_ ) , lambda: scores , ) , )
return scores
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[Any] , a : Optional[int] , a : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = generate_config.eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = generate_config.no_timestamps_token_id
SCREAMING_SNAKE_CASE__ : int = generate_config.no_timestamps_token_id + 1
SCREAMING_SNAKE_CASE__ : Tuple = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowercase_ , "max_initial_timestamp_index" ):
SCREAMING_SNAKE_CASE__ : Any = generate_config.max_initial_timestamp_index
else:
SCREAMING_SNAKE_CASE__ : Tuple = model_config.vocab_size
if self.max_initial_timestamp_index is None:
SCREAMING_SNAKE_CASE__ : str = model_config.vocab_size
def __call__( self : int , a : List[str] , a : List[Any] , a : Optional[Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(a : Optional[Any] , a : int ):
SCREAMING_SNAKE_CASE__ : Any = jnp.where((cur_len - self.begin_index) >= 1 , lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ : int = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowercase_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = jnp.where((cur_len - self.begin_index) < 2 , lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ : str = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowercase_ , lowercase_ , )
return jnp.where(
lowercase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , lowercase_ , )
SCREAMING_SNAKE_CASE__ : Any = jax.vmap(lowercase_ )(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.where(cur_len == self.begin_index , lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ : int = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowercase_ , )
SCREAMING_SNAKE_CASE__ : str = self.timestamp_begin + self.max_initial_timestamp_index
SCREAMING_SNAKE_CASE__ : Dict = jnp.where(
lowercase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , lowercase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
SCREAMING_SNAKE_CASE__ : List[str] = jax.nn.log_softmax(lowercase_ , axis=-1 )
def handle_cumulative_probs(a : int , a : List[str] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
SCREAMING_SNAKE_CASE__ : Dict = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , lowercase_ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.vmap(lowercase_ )(lowercase_ , lowercase_ )
return scores
| 715 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :Union[str, Any] = logging.get_logger(__name__)
__lowercase :str = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class _a ( __lowerCAmelCase ):
"""simple docstring"""
snake_case_ = '''blip_text_model'''
def __init__( self : List[str] , a : Any=3_05_24 , a : Optional[Any]=7_68 , a : Tuple=7_68 , a : Optional[Any]=30_72 , a : Optional[Any]=7_68 , a : Any=12 , a : Optional[int]=8 , a : List[str]=5_12 , a : Any="gelu" , a : Union[str, Any]=1E-12 , a : List[str]=0.0 , a : Union[str, Any]=0.0 , a : Union[str, Any]=0.02 , a : Any=3_05_22 , a : Optional[Any]=2 , a : Tuple=0 , a : int=1_02 , a : Any=True , a : Any=True , **a : int , ) ->Union[str, Any]:
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , sep_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_hidden_size
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = projection_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = is_decoder
SCREAMING_SNAKE_CASE__ : Dict = use_cache
@classmethod
def A_ ( cls : Union[str, Any] , a : Optional[Any] , **a : Tuple ) ->"PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
SCREAMING_SNAKE_CASE__ : List[str] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class _a ( __lowerCAmelCase ):
"""simple docstring"""
snake_case_ = '''blip_vision_model'''
def __init__( self : Any , a : Tuple=7_68 , a : Union[str, Any]=30_72 , a : List[str]=5_12 , a : Optional[int]=12 , a : Dict=12 , a : Union[str, Any]=3_84 , a : Union[str, Any]=16 , a : Optional[Any]="gelu" , a : Union[str, Any]=1E-5 , a : Tuple=0.0 , a : int=1E-10 , **a : int , ) ->Optional[Any]:
super().__init__(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = projection_dim
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : int = hidden_act
@classmethod
def A_ ( cls : Union[str, Any] , a : int , **a : Optional[int] ) ->"PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
SCREAMING_SNAKE_CASE__ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class _a ( __lowerCAmelCase ):
"""simple docstring"""
snake_case_ = '''blip'''
snake_case_ = True
def __init__( self : List[Any] , a : Dict=None , a : Tuple=None , a : Any=5_12 , a : List[str]=2.6592 , a : Optional[int]=2_56 , **a : List[str] , ) ->int:
super().__init__(**lowerCAmelCase_ )
if text_config is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE__ : Dict = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
SCREAMING_SNAKE_CASE__ : List[Any] = BlipTextConfig(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = BlipVisionConfig(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = projection_dim
SCREAMING_SNAKE_CASE__ : Any = logit_scale_init_value
SCREAMING_SNAKE_CASE__ : List[Any] = 1.0
SCREAMING_SNAKE_CASE__ : int = 0.02
SCREAMING_SNAKE_CASE__ : int = image_text_hidden_size
@classmethod
def A_ ( cls : List[str] , a : Optional[Any] , a : Optional[int] , **a : List[str] ) ->Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def A_ ( self : Optional[int] ) ->Dict:
SCREAMING_SNAKE_CASE__ : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : str = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ : int = self.__class__.model_type
return output
| 716 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : List[Any] ):
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
SCREAMING_SNAKE_CASE__ : int = 4
SCREAMING_SNAKE_CASE__ : Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 717 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 0 |
import random
class _a :
"""simple docstring"""
@staticmethod
def A_ ( a : int ) ->str:
SCREAMING_SNAKE_CASE__ : int = [ord(snake_case__ ) for i in text]
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in plain:
SCREAMING_SNAKE_CASE__ : int = random.randint(1 , 3_00 )
SCREAMING_SNAKE_CASE__ : int = (i + k) * k
cipher.append(snake_case__ )
key.append(snake_case__ )
return cipher, key
@staticmethod
def A_ ( a : List[str] , a : int ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = []
for i in range(len(snake_case__ ) ):
SCREAMING_SNAKE_CASE__ : Tuple = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(snake_case__ ) )
return "".join(snake_case__ )
if __name__ == "__main__":
__lowercase :Optional[Any] = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 718 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 0 |
from math import isqrt
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(__a ) + 1 ) )
def UpperCAmelCase ( _lowerCamelCase : int = 10**6 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : List[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(__a )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"{solution() = }")
| 719 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase :Union[str, Any] = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
__lowercase :str = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(re.match(r".*layer_(\d*).*" , _lowerCamelCase )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
SCREAMING_SNAKE_CASE__ : str = re.search(r"[^\d](\d+)$" , str(_lowerCamelCase ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any ):
'''simple docstring'''
if bloom_config_file == "":
SCREAMING_SNAKE_CASE__ : str = BloomConfig()
else:
SCREAMING_SNAKE_CASE__ : Dict = BloomConfig.from_json_file(_lowerCamelCase )
if shard_model:
SCREAMING_SNAKE_CASE__ : Dict = os.listdir(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = sorted(filter(lambda _lowerCamelCase : s.startswith("layer" ) and "model_00" in s , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = {"weight_map": {}, "metadata": {}}
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = BloomConfig()
for j, file in enumerate(_lowerCamelCase ):
print("Processing file: {}".format(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
for i in range(_lowerCamelCase ):
# load all TP files
SCREAMING_SNAKE_CASE__ : List[Any] = file.replace("model_00" , f"""model_0{i}""" )
SCREAMING_SNAKE_CASE__ : Tuple = torch.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE__ : Optional[int] = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp.pop(_lowerCamelCase )
if tensors is None:
SCREAMING_SNAKE_CASE__ : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE__ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE__ : Any = torch.cat([tensors[key], temp[key]] , dim=_lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE__ : List[str] = tensors[key] / pretraining_tp
torch.save(
_lowerCamelCase , os.path.join(
_lowerCamelCase , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(_lowerCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
SCREAMING_SNAKE_CASE__ : Dict = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
SCREAMING_SNAKE_CASE__ : List[str] = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(_lowerCamelCase ) ).zfill(5 ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = BloomConfig()
SCREAMING_SNAKE_CASE__ : Tuple = pytorch_dump_folder_path + "/" + CONFIG_NAME
SCREAMING_SNAKE_CASE__ : Optional[int] = total_size
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowerCamelCase , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + "\n"
f.write(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : List[str] = BloomModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.listdir(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = sorted(filter(lambda _lowerCamelCase : s.startswith("layer" ) and "model_00" in s , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = None
for i, file in enumerate(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
for i in range(_lowerCamelCase ):
# load all TP files
SCREAMING_SNAKE_CASE__ : Tuple = file.replace("model_00" , f"""model_0{i}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) , map_location="cpu" )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE__ : List[Any] = temp.pop(_lowerCamelCase )
if tensors is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE__ : str = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE__ : Any = torch.cat([tensors[key], temp[key]] , dim=_lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE__ : Dict = tensors[key] / pretraining_tp
SCREAMING_SNAKE_CASE__ : Dict = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = set(other_keys.missing_keys )
else:
SCREAMING_SNAKE_CASE__ : str = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE__ : int = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowerCamelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowercase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
__lowercase :Dict = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 720 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase :Union[str, Any] = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Dict = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :str = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :int = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Optional[Any] = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowercase :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 0 |
import numpy as np
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Any ):
'''simple docstring'''
return np.where(vector > 0 , _lowerCAmelCase , (alpha * (np.exp(_lowerCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 0 |
import numpy
# List of input, output pairs
__lowercase :Tuple = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__lowercase :Tuple = (((515, 22, 13), 555), ((61, 35, 49), 150))
__lowercase :List[Any] = [2, 4, 1, 5]
__lowercase :str = len(train_data)
__lowercase :Optional[Any] = 0.0_0_9
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Tuple="train" ):
'''simple docstring'''
return calculate_hypothesis_value(lowerCamelCase_ , lowerCamelCase_ ) - output(
lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 0
for i in range(len(lowerCamelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=m ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for i in range(lowerCamelCase_ ):
if index == -1:
summation_value += _error(lowerCamelCase_ )
else:
summation_value += _error(lowerCamelCase_ ) * train_data[i][0][index]
return summation_value
def UpperCAmelCase ( _lowerCamelCase : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = summation_of_cost_derivative(lowerCamelCase_ , lowerCamelCase_ ) / m
return cost_derivative_value
def UpperCAmelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
SCREAMING_SNAKE_CASE__ : List[Any] = 0.0_0_0_0_0_2
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
while True:
j += 1
SCREAMING_SNAKE_CASE__ : Any = [0, 0, 0, 0]
for i in range(0 , len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ : Any = get_cost_derivative(i - 1 )
SCREAMING_SNAKE_CASE__ : Any = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase_ , lowerCamelCase_ , atol=lowerCamelCase_ , rtol=lowerCamelCase_ , ):
break
SCREAMING_SNAKE_CASE__ : int = temp_parameter_vector
print(("Number of iterations:", j) )
def UpperCAmelCase ( ):
'''simple docstring'''
for i in range(len(lowerCamelCase_ ) ):
print(("Actual output value:", output(lowerCamelCase_ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(lowerCamelCase_ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 701 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 0 |
from __future__ import annotations
class _a :
"""simple docstring"""
def __init__( self : Tuple , a : str , a : str ) ->Dict:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = text, pattern
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_lowercase ), len(_lowercase )
def A_ ( self : List[str] , a : str ) ->Optional[Any]:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A_ ( self : Union[str, Any] , a : int ) ->Optional[int]:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A_ ( self : Union[str, Any] ) ->Optional[int]:
# searches pattern in text and returns index positions
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.mismatch_in_text(_lowercase )
if mismatch_index == -1:
positions.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE__ : List[str] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__lowercase :Any = "ABAABA"
__lowercase :int = "AB"
__lowercase :str = BoyerMooreSearch(text, pattern)
__lowercase :List[str] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 702 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = None
snake_case_ = None
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
def is_valid_tree(_lowerCamelCase : Optional[int] ) -> bool:
if node is None:
return True
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(__SCREAMING_SNAKE_CASE , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , a : List[Any] , a : Any=7 , a : Optional[int]=3 , a : List[Any]=30 , a : Tuple=4_00 , a : str=True , a : Any=None , a : List[Any]=True , a : int=[0.5, 0.5, 0.5] , a : str=[0.5, 0.5, 0.5] , a : Union[str, Any]=True , a : Optional[int]=1 / 2_55 , a : List[Any]=True , ) ->Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Dict = min_resolution
SCREAMING_SNAKE_CASE__ : int = max_resolution
SCREAMING_SNAKE_CASE__ : int = do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size
SCREAMING_SNAKE_CASE__ : str = do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : str = image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Dict = rescale_factor
SCREAMING_SNAKE_CASE__ : Any = do_pad
def A_ ( self : Tuple ) ->int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A_ ( self : List[Any] , a : str , a : Union[str, Any]=False ) ->List[str]:
if not batched:
SCREAMING_SNAKE_CASE__ : Any = image_inputs[0]
if isinstance(A_ , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ : List[Any] = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE__ : List[str] = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE__ : Dict = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Optional[int] = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE__ : Tuple = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Any = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE__ : Any = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : int = max(A_ , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : str = max(A_ , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = DetaImageProcessor if is_vision_available() else None
def A_ ( self : Any ) ->int:
SCREAMING_SNAKE_CASE__ : List[str] = DetaImageProcessingTester(self )
@property
def A_ ( self : Dict ) ->Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Union[str, Any] ) ->str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_rescale" ) )
self.assertTrue(hasattr(A_ , "do_pad" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def A_ ( self : Optional[Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : List[Any] ) ->List[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
SCREAMING_SNAKE_CASE__ : Any = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : List[Any] ) ->Any:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : List[str] ) ->Any:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(A_ , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A_ ( self : List[Any] ) ->int:
# prepare image and target
SCREAMING_SNAKE_CASE__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ : Tuple = {"image_id": 3_97_69, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE__ : Optional[int] = DetaImageProcessor()
SCREAMING_SNAKE_CASE__ : Dict = image_processing(images=A_ , annotations=A_ , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A_ )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) )
# verify boxes
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ : int = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) )
# verify size
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) )
@slow
def A_ ( self : Optional[Any] ) ->str:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE__ : str = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ : Any = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
SCREAMING_SNAKE_CASE__ : List[str] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE__ : List[Any] = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE__ : str = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE__ : Dict = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A_ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) )
# verify boxes
SCREAMING_SNAKE_CASE__ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ )
SCREAMING_SNAKE_CASE__ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) )
# verify masks
SCREAMING_SNAKE_CASE__ : int = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A_ )
# verify orig_size
SCREAMING_SNAKE_CASE__ : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) )
# verify size
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) )
| 704 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowercase :List[Any] = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowercase :str = 'UperNetConfig'
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , a : int , a : int , a : Union[int, Tuple[int, int]] , a : Union[int, Tuple[int, int], str] = 0 , a : bool = False , a : Union[int, Tuple[int, int]] = 1 , ) ->None:
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Convad(
in_channels=a , out_channels=a , kernel_size=a , padding=a , bias=a , dilation=a , )
SCREAMING_SNAKE_CASE__ : Any = nn.BatchNormad(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.ReLU()
def A_ ( self : List[str] , a : torch.Tensor ) ->torch.Tensor:
SCREAMING_SNAKE_CASE__ : int = self.conv(a )
SCREAMING_SNAKE_CASE__ : Any = self.batch_norm(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.activation(a )
return output
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , a : int , a : int , a : int ) ->None:
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
nn.AdaptiveAvgPoolad(a ),
UperNetConvModule(a , a , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(a ) , a )
def A_ ( self : Union[str, Any] , a : torch.Tensor ) ->torch.Tensor:
SCREAMING_SNAKE_CASE__ : Tuple = input
for layer in self.layers:
SCREAMING_SNAKE_CASE__ : Dict = layer(a )
return hidden_state
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , a : Tuple[int, ...] , a : int , a : int , a : bool ) ->None:
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] = pool_scales
SCREAMING_SNAKE_CASE__ : Optional[Any] = align_corners
SCREAMING_SNAKE_CASE__ : List[str] = in_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i, pool_scale in enumerate(a ):
SCREAMING_SNAKE_CASE__ : str = UperNetPyramidPoolingBlock(pool_scale=a , in_channels=a , channels=a )
self.blocks.append(a )
self.add_module(str(a ) , a )
def A_ ( self : List[str] , a : torch.Tensor ) ->List[torch.Tensor]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for ppm in self.blocks:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ppm(a )
SCREAMING_SNAKE_CASE__ : str = nn.functional.interpolate(
a , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(a )
return ppm_outs
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , a : Any , a : Optional[Any] ) ->int:
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] = config
SCREAMING_SNAKE_CASE__ : str = config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE__ : Optional[int] = in_channels
SCREAMING_SNAKE_CASE__ : List[str] = config.hidden_size
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
SCREAMING_SNAKE_CASE__ : List[Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
SCREAMING_SNAKE_CASE__ : Tuple = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
SCREAMING_SNAKE_CASE__ : Any = nn.ModuleList()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE__ : Tuple = UperNetConvModule(a , self.channels , kernel_size=1 )
SCREAMING_SNAKE_CASE__ : List[Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(a )
self.fpn_convs.append(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def A_ ( self : Tuple ) ->List[str]:
self.apply(self._init_weights )
def A_ ( self : str , a : int ) ->Tuple:
if isinstance(a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A_ ( self : List[str] , a : List[str] ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs[-1]
SCREAMING_SNAKE_CASE__ : Tuple = [x]
psp_outs.extend(self.psp_modules(a ) )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(a , dim=1 )
SCREAMING_SNAKE_CASE__ : Any = self.bottleneck(a )
return output
def A_ ( self : List[Any] , a : torch.Tensor ) ->torch.Tensor:
# build laterals
SCREAMING_SNAKE_CASE__ : List[str] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(a ) )
# build top-down path
SCREAMING_SNAKE_CASE__ : str = len(a )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__ : str = laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE__ : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=a , mode="bilinear" , align_corners=self.align_corners )
# build outputs
SCREAMING_SNAKE_CASE__ : int = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__ : Tuple = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
SCREAMING_SNAKE_CASE__ : Any = torch.cat(a , dim=1 )
SCREAMING_SNAKE_CASE__ : int = self.fpn_bottleneck(a )
SCREAMING_SNAKE_CASE__ : int = self.classifier(a )
return output
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , a : Any , a : int = 2 , a : int = 3 , a : Union[int, Tuple[int, int]] = 1 ) ->None:
super().__init__()
SCREAMING_SNAKE_CASE__ : int = config
SCREAMING_SNAKE_CASE__ : Optional[Any] = config.auxiliary_in_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config.auxiliary_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = config.auxiliary_num_convs
SCREAMING_SNAKE_CASE__ : Optional[Any] = config.auxiliary_concat_input
SCREAMING_SNAKE_CASE__ : str = in_index
SCREAMING_SNAKE_CASE__ : List[Any] = (kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE__ : Any = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=a , padding=a , dilation=a ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=a , padding=a , dilation=a ) )
if self.num_convs == 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Identity()
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Sequential(*a )
if self.concat_input:
SCREAMING_SNAKE_CASE__ : Any = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=a , padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def A_ ( self : List[Any] ) ->List[str]:
self.apply(self._init_weights )
def A_ ( self : Optional[int] , a : Dict ) ->List[str]:
if isinstance(a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A_ ( self : Union[str, Any] , a : torch.Tensor ) ->torch.Tensor:
# just take the relevant feature maps
SCREAMING_SNAKE_CASE__ : str = encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.convs(a )
if self.concat_input:
SCREAMING_SNAKE_CASE__ : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
SCREAMING_SNAKE_CASE__ : str = self.classifier(a )
return output
class _a ( __lowercase ):
"""simple docstring"""
snake_case_ = UperNetConfig
snake_case_ = "pixel_values"
snake_case_ = True
def A_ ( self : Tuple , a : List[Any] ) ->Optional[Any]:
if isinstance(a , a ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def A_ ( self : Dict ) ->Any:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def A_ ( self : List[str] , a : List[Any] , a : Optional[Any]=False ) ->Union[str, Any]:
if isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : Tuple = value
__lowercase :Dict = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowercase :Dict = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , __lowercase , )
class _a ( __lowercase ):
"""simple docstring"""
def __init__( self : int , a : Tuple ) ->List[str]:
super().__init__(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE__ : int = UperNetHead(a , in_channels=self.backbone.channels )
SCREAMING_SNAKE_CASE__ : Any = UperNetFCNHead(a ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=a , config_class=_CONFIG_FOR_DOC )
def A_ ( self : Any , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , ) ->Union[tuple, SemanticSegmenterOutput]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE__ : int = self.backbone.forward_with_filtered_kwargs(
a , output_hidden_states=a , output_attentions=a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.feature_maps
SCREAMING_SNAKE_CASE__ : str = self.decode_head(a )
SCREAMING_SNAKE_CASE__ : List[str] = nn.functional.interpolate(a , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.auxiliary_head(a )
SCREAMING_SNAKE_CASE__ : List[str] = nn.functional.interpolate(
a , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=a )
SCREAMING_SNAKE_CASE__ : int = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
SCREAMING_SNAKE_CASE__ : Any = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
SCREAMING_SNAKE_CASE__ : Optional[Any] = loss_fct(a , a )
SCREAMING_SNAKE_CASE__ : Optional[int] = loss_fct(a , a )
SCREAMING_SNAKE_CASE__ : Tuple = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE__ : Tuple = (logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE__ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=a , logits=a , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 705 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase :int = get_tests_dir("fixtures/test_sentencepiece.model")
__lowercase :List[Any] = {"target_lang": "fi", "source_lang": "en"}
__lowercase :int = ">>zh<<"
__lowercase :str = "Helsinki-NLP/"
if is_torch_available():
__lowercase :int = "pt"
elif is_tf_available():
__lowercase :Optional[Any] = "tf"
else:
__lowercase :int = "jax"
@require_sentencepiece
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = MarianTokenizer
snake_case_ = False
snake_case_ = True
def A_ ( self : List[str] ) ->Union[str, Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ : str = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(self.tmpdirname )
save_json(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(__UpperCamelCase , save_dir / VOCAB_FILES_NAMES["target_spm"] )
SCREAMING_SNAKE_CASE__ : Tuple = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : List[str] , **a : List[Any] ) ->MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def A_ ( self : Tuple , a : List[Any] ) ->List[str]:
return (
"This is a test",
"This is a test",
)
def A_ ( self : Tuple ) ->Any:
SCREAMING_SNAKE_CASE__ : List[Any] = "</s>"
SCREAMING_SNAKE_CASE__ : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def A_ ( self : Union[str, Any] ) ->int:
SCREAMING_SNAKE_CASE__ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__UpperCamelCase ) , 9 )
def A_ ( self : Optional[int] ) ->Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
SCREAMING_SNAKE_CASE__ : Tuple = en_de_tokenizer(["I am a small frog"] , return_tensors=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(__UpperCamelCase , batch.input_ids[0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : int = [x.name for x in Path(__UpperCamelCase ).glob("*" )]
self.assertIn("source.spm" , __UpperCamelCase )
MarianTokenizer.from_pretrained(__UpperCamelCase )
def A_ ( self : Optional[int] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = tok(
["I am a small frog" * 10_00, "I am a small frog"] , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def A_ ( self : str ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Any = tok(["I am a tiny frog", "I am a small frog"] , padding=__UpperCamelCase , return_tensors=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def A_ ( self : List[Any] ) ->Tuple:
# fmt: off
SCREAMING_SNAKE_CASE__ : str = {"input_ids": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def A_ ( self : int ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "Tämä on testi"
SCREAMING_SNAKE_CASE__ : int = "This is a test"
SCREAMING_SNAKE_CASE__ : Dict = [76, 7, 20_47, 2]
SCREAMING_SNAKE_CASE__ : List[Any] = [69, 12, 11, 9_40, 2]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(__UpperCamelCase ).input_ids
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer(text_target=__UpperCamelCase ).input_ids
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ : str = tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 706 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :int = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : List[Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : Tuple = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
| 707 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__lowercase :Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[int, Iterable[int]] , _lowerCamelCase : bool , _lowerCamelCase : int ):
'''simple docstring'''
def constraint_to_multiple_of(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=0 , _lowerCamelCase : Any=None ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Dict = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : str = (output_size, output_size) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else output_size
SCREAMING_SNAKE_CASE__ : Any = get_image_size(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : Optional[int] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : str = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase__ )
return (new_height, new_width)
class _a ( __A ):
"""simple docstring"""
snake_case_ = ["""pixel_values"""]
def __init__( self : Union[str, Any] , a : List[Any] = True , a : Optional[Any] = None , a : Optional[int] = PILImageResampling.BILINEAR , a : int = False , a : Optional[Any] = 1 , a : Tuple = True , a : str = 1 / 2_55 , a : Any = True , a : str = None , a : int = None , **a : Union[str, Any] , ) ->Union[str, Any]:
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any = size if size is not None else {'height': 3_84, 'width': 3_84}
SCREAMING_SNAKE_CASE__ : str = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str = do_resize
SCREAMING_SNAKE_CASE__ : str = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Tuple = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Optional[int] = resample
SCREAMING_SNAKE_CASE__ : int = do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor
SCREAMING_SNAKE_CASE__ : str = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : int , a : Any , a : Dict , a : Optional[Any] = False , a : Optional[int] = 1 , a : Tuple = PILImageResampling.BICUBIC , a : Dict = None , **a : str , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A_ ( self : Dict , a : Optional[int] , a : Union[str, Any] , a : Optional[int] = None , **a : List[str] , ) ->Dict:
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A_ ( self : List[str] , a : List[str] , a : Optional[int] , a : Optional[int] , a : Tuple = None , **a : Union[str, Any] , ) ->Union[str, Any]:
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A_ ( self : Any , a : str , a : Union[str, Any] = None , a : Any = None , a : List[str] = None , a : Any = None , a : Optional[int] = None , a : List[Any] = None , a : Optional[int] = None , a : Any = None , a : Dict = None , a : Optional[int] = None , a : str = None , a : int = ChannelDimension.FIRST , **a : Any , ) ->int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : str = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : str = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : List[Any] = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Any = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE__ : str = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def A_ ( self : List[str] , a : Tuple , a : Tuple = None ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 708 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( a__ ):
"""simple docstring"""
snake_case_ = (UnCLIPScheduler,)
def A_ ( self : Dict , **a : Optional[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"num_train_timesteps": 10_00,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**lowerCamelCase_ )
return config
def A_ ( self : Any ) ->Tuple:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def A_ ( self : Tuple ) ->Any:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def A_ ( self : List[Any] ) ->Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def A_ ( self : Tuple ) ->List[str]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowerCamelCase_ )
def A_ ( self : List[Any] ) ->Tuple:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def A_ ( self : Optional[int] ) ->List[Any]:
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCamelCase_ , prev_timestep=lowerCamelCase_ )
def A_ ( self : Any ) ->str:
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config(variance_type="fixed_small_log" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5
def A_ ( self : Union[str, Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_scheduler_config(variance_type="learned_range" )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCamelCase_ ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=lowerCamelCase_ ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=lowerCamelCase_ ) - -0.001_0011 < 1E-5
def A_ ( self : Optional[int] ) ->Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ : int = scheduler.timesteps
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = pred_prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def A_ ( self : Any ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE__ : int = scheduler.timesteps
SCREAMING_SNAKE_CASE__ : int = self.dummy_model()
SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(lowerCamelCase_ , lowerCamelCase_ )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE__ : List[str] = None
else:
SCREAMING_SNAKE_CASE__ : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , prev_timestep=lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE__ : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE__ : List[str] = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def A_ ( self : str ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
pass
| 709 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
__lowercase :Tuple = logging.getLogger(__name__)
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = git.Repo(search_parent_directories=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"repo_id": str(_lowerCamelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(_lowerCamelCase , "git_log.json" ) , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=4 )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
if params.n_gpu <= 0:
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : int = -1
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Dict = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
SCREAMING_SNAKE_CASE__ : int = int(os.environ["WORLD_SIZE"] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(os.environ["N_GPU_NODE"] )
SCREAMING_SNAKE_CASE__ : List[str] = int(os.environ["RANK"] )
# number of nodes / node ID
SCREAMING_SNAKE_CASE__ : int = params.world_size // params.n_gpu_per_node
SCREAMING_SNAKE_CASE__ : Optional[Any] = params.global_rank // params.n_gpu_per_node
SCREAMING_SNAKE_CASE__ : List[str] = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
SCREAMING_SNAKE_CASE__ : List[str] = 1
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : List[Any] = 1
SCREAMING_SNAKE_CASE__ : List[Any] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
SCREAMING_SNAKE_CASE__ : List[Any] = params.node_id == 0 and params.local_rank == 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = params.n_nodes > 1
# summary
SCREAMING_SNAKE_CASE__ : int = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def UpperCAmelCase ( _lowerCamelCase : Any ):
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 710 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Tuple , a : int , a : int ) ->Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.ones((batch_size, length) ) / length
return scores
def A_ ( self : Tuple ) ->List[str]:
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Tuple = 20
SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase_ )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE__ : Optional[int] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE__ : Optional[Any] = jax.nn.softmax(lowerCAmelCase_ , axis=-1 )
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE__ : str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase_ , scores.copy() , cur_len=lowerCAmelCase_ ) , axis=-1 )
SCREAMING_SNAKE_CASE__ : Any = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase_ , scores.copy() , cur_len=lowerCAmelCase_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
# create ramp distribution
SCREAMING_SNAKE_CASE__ : List[str] = np.broadcast_to(np.arange(lowerCAmelCase_ )[None, :] , (batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE__ : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE__ : str = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE__ : str = top_k_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE__ : Tuple = 5
SCREAMING_SNAKE_CASE__ : int = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE__ : List[Any] = np.broadcast_to(np.arange(lowerCAmelCase_ )[None, :] , (batch_size, length) ).copy()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = top_k_warp_safety_check(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def A_ ( self : int ) ->Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Tuple = 10
SCREAMING_SNAKE_CASE__ : str = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE__ : Dict = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE__ : int = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.exp(top_p_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE__ : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE__ : Dict = np.broadcast_to(np.arange(lowerCAmelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE__ : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
SCREAMING_SNAKE_CASE__ : Dict = top_p_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = 20
SCREAMING_SNAKE_CASE__ : str = 4
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Any = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase_ )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor((batch_size, 20) , vocab_size=20 )
SCREAMING_SNAKE_CASE__ : List[Any] = 5
SCREAMING_SNAKE_CASE__ : List[str] = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] = min_dist_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE__ : List[str] = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] = 15
SCREAMING_SNAKE_CASE__ : int = min_dist_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
self.assertFalse(jnp.isinf(lowerCAmelCase_ ).any() )
def A_ ( self : str ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = 20
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase_ )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor((batch_size, 1) , vocab_size=20 )
SCREAMING_SNAKE_CASE__ : str = 1
SCREAMING_SNAKE_CASE__ : Tuple = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple = logits_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE__ : Tuple = 3
SCREAMING_SNAKE_CASE__ : Dict = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Dict = logits_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
self.assertFalse(jnp.isinf(lowerCAmelCase_ ).any() )
def A_ ( self : Optional[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 20
SCREAMING_SNAKE_CASE__ : Dict = 4
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Dict = 5
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((batch_size, 4) , vocab_size=20 )
SCREAMING_SNAKE_CASE__ : List[Any] = 4
SCREAMING_SNAKE_CASE__ : Dict = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] = logits_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE__ : str = 3
SCREAMING_SNAKE_CASE__ : List[str] = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] = logits_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
self.assertFalse(jnp.isinf(lowerCAmelCase_ ).any() )
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE__ : Optional[Any] = 10
SCREAMING_SNAKE_CASE__ : str = 15
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : Any = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE__ : str = ids_tensor((batch_size, sequence_length) , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.copy()
SCREAMING_SNAKE_CASE__ : Tuple = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE__ : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE__ : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] = 10
# no processor list
SCREAMING_SNAKE_CASE__ : List[Any] = temp_dist_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Dict = top_k_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple = top_p_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] = min_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple = bos_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] = eos_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
# with processor list
SCREAMING_SNAKE_CASE__ : int = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def A_ ( self : int ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : str = 4
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10
SCREAMING_SNAKE_CASE__ : List[Any] = 15
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((batch_size, sequence_length) , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.copy()
SCREAMING_SNAKE_CASE__ : int = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE__ : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE__ : Any = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE__ : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : str = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Dict = 10
# no processor list
def run_no_processor_list(a : int , a : Dict , a : List[Any] ):
SCREAMING_SNAKE_CASE__ : Any = temp_dist_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] = top_k_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple = top_p_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : str = min_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bos_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple = eos_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
return scores
# with processor list
def run_processor_list(a : Union[str, Any] , a : int , a : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ )
return scores
SCREAMING_SNAKE_CASE__ : List[Any] = jax.jit(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jax.jit(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : str = jitted_run_no_processor_list(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jitted_run_processor_list(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 711 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase :str = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Tuple = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :List[str] = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__lowercase = logging.get_logger(__name__)
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = None
@staticmethod
def A_ ( ) ->Optional[Any]:
raise NotImplementedError
def A_ ( self : Tuple , a : Any , a : str , a : Any , **a : List[Any] ) ->List[str]:
raise NotImplementedError
def A_ ( self : List[str] , a : int ) ->List[Any]:
raise NotImplementedError
def A_ ( self : Tuple ) ->Optional[int]:
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def A_ ( cls : List[Any] ) ->Optional[Any]:
return f"""`pip install {cls.pip_package or cls.name}`"""
class _a ( _a ):
"""simple docstring"""
snake_case_ = "optuna"
@staticmethod
def A_ ( ) ->List[str]:
return is_optuna_available()
def A_ ( self : Dict , a : Dict , a : int , a : str , **a : Tuple ) ->Optional[int]:
return run_hp_search_optuna(_A , _A , _A , **_A )
def A_ ( self : Tuple , a : int ) ->Tuple:
return default_hp_space_optuna(_A )
class _a ( _a ):
"""simple docstring"""
snake_case_ = "ray"
snake_case_ = "'ray[tune]'"
@staticmethod
def A_ ( ) ->Any:
return is_ray_available()
def A_ ( self : str , a : List[str] , a : Tuple , a : List[str] , **a : Any ) ->int:
return run_hp_search_ray(_A , _A , _A , **_A )
def A_ ( self : str , a : Union[str, Any] ) ->Dict:
return default_hp_space_ray(_A )
class _a ( _a ):
"""simple docstring"""
snake_case_ = "sigopt"
@staticmethod
def A_ ( ) ->Optional[int]:
return is_sigopt_available()
def A_ ( self : List[str] , a : Any , a : str , a : Any , **a : str ) ->int:
return run_hp_search_sigopt(_A , _A , _A , **_A )
def A_ ( self : List[str] , a : List[Any] ) ->List[Any]:
return default_hp_space_sigopt(_A )
class _a ( _a ):
"""simple docstring"""
snake_case_ = "wandb"
@staticmethod
def A_ ( ) ->Union[str, Any]:
return is_wandb_available()
def A_ ( self : str , a : List[Any] , a : Optional[int] , a : Union[str, Any] , **a : List[Any] ) ->Dict:
return run_hp_search_wandb(_A , _A , _A , **_A )
def A_ ( self : Any , a : List[Any] ) ->Optional[int]:
return default_hp_space_wandb(_A )
__lowercase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(UpperCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE__ : List[Any] = available_backends[0].name
if len(UpperCAmelCase__ ) > 1:
logger.info(
f"""{len(UpperCAmelCase__ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__lowercase :str = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
__lowercase :List[str] = logging.WARNING
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = os.getenv("DATASETS_VERBOSITY" , UpperCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase ( ):
'''simple docstring'''
return __name__.split("." )[0]
def UpperCAmelCase ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCAmelCase ( _lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if name is None:
SCREAMING_SNAKE_CASE__ : List[Any] = _get_library_name()
return logging.getLogger(UpperCAmelCase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
_get_library_root_logger().setLevel(UpperCAmelCase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
return set_verbosity(UpperCAmelCase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
return set_verbosity(UpperCAmelCase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
return set_verbosity(UpperCAmelCase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
return set_verbosity(UpperCAmelCase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , *a : Any , **a : int ) ->Tuple: # pylint: disable=unused-argument
SCREAMING_SNAKE_CASE__ : Any = args[0] if args else None
def __iter__( self : Tuple ) ->Tuple:
return iter(self._iterator )
def __getattr__( self : str , a : List[str] ) ->List[str]:
def empty_fn(*a : List[str] , **a : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Union[str, Any] ) ->int:
return self
def __exit__( self : Optional[Any] , a : List[str] , a : str , a : Dict ) ->int:
return
__lowercase :List[str] = True
class _a :
"""simple docstring"""
def __call__( self : Any , *a : str , a : str=False , **a : Optional[Any] ) ->Union[str, Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__UpperCamelCase , **__UpperCamelCase )
else:
return EmptyTqdm(*__UpperCamelCase , **__UpperCamelCase )
def A_ ( self : Optional[Any] , *a : List[Any] , **a : List[Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__UpperCamelCase , **__UpperCamelCase )
def A_ ( self : List[str] ) ->Optional[int]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowercase :Any = _tqdm_cls()
def UpperCAmelCase ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase ( ):
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE__ : List[Any] = True
def UpperCAmelCase ( ):
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE__ : List[str] = False
| 714 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase :str = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__lowercase :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 0 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__lowercase :Tuple = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = set()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
def parse_line(_lowerCamelCase : Any ):
for line in fp:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Any = "\n".join(_lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(_lowerCamelCase )
buffer.clear()
continue
else:
SCREAMING_SNAKE_CASE__ : Tuple = line.strip()
buffer.append(_lowerCamelCase )
if from_gh:
for filename in os.listdir(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
else:
try:
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = set()
SCREAMING_SNAKE_CASE__ : Tuple = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return values.split("," )
__lowercase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
__lowercase :List[str] = parser.parse_args()
__lowercase :Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__lowercase :List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__lowercase :Any = extract_warnings(args.output_dir, args.targets)
__lowercase :List[str] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 716 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :Tuple = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 717 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : int ):
'''simple docstring'''
if isinstance(snake_case_ , torch.Tensor ):
return image
elif isinstance(snake_case_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ : Dict = np.concatenate(snake_case_ , axis=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array(snake_case_ ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE__ : Dict = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ : List[Any] = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.from_numpy(snake_case_ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat(snake_case_ , dim=0 )
return image
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any=0.9_9_9_5 ):
'''simple docstring'''
if not isinstance(snake_case_ , np.ndarray ):
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : Any = va.device
SCREAMING_SNAKE_CASE__ : str = va.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = va.cpu().numpy()
SCREAMING_SNAKE_CASE__ : str = np.sum(va * va / (np.linalg.norm(snake_case_ ) * np.linalg.norm(snake_case_ )) )
if np.abs(snake_case_ ) > DOT_THRESHOLD:
SCREAMING_SNAKE_CASE__ : Tuple = (1 - t) * va + t * va
else:
SCREAMING_SNAKE_CASE__ : str = np.arccos(snake_case_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.sin(snake_case_ )
SCREAMING_SNAKE_CASE__ : Tuple = theta_a * t
SCREAMING_SNAKE_CASE__ : Any = np.sin(snake_case_ )
SCREAMING_SNAKE_CASE__ : int = np.sin(theta_a - theta_t ) / sin_theta_a
SCREAMING_SNAKE_CASE__ : Dict = sin_theta_t / sin_theta_a
SCREAMING_SNAKE_CASE__ : Optional[int] = sa * va + sa * va
if inputs_are_torch:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.from_numpy(snake_case_ ).to(snake_case_ )
return va
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = F.normalize(snake_case_ , dim=-1 )
SCREAMING_SNAKE_CASE__ : List[Any] = F.normalize(snake_case_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : List[Any] ):
'''simple docstring'''
for param in model.parameters():
SCREAMING_SNAKE_CASE__ : Any = value
class _a ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , a : int , a : Any , a : str , a : List[str] , a : Tuple , a : Tuple , a : Optional[int] , a : List[str]=None , a : Union[str, Any]=None , a : Optional[int]=None , ) ->str:
super().__init__()
self.register_modules(
vae=a , text_encoder=a , clip_model=a , tokenizer=a , unet=a , scheduler=a , feature_extractor=a , coca_model=a , coca_tokenizer=a , coca_transform=a , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , a )
else feature_extractor.size['''shortest_edge''']
)
SCREAMING_SNAKE_CASE__ : Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , a )
set_requires_grad(self.clip_model , a )
def A_ ( self : Any , a : Tuple = "auto" ) ->Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def A_ ( self : Dict ) ->int:
self.enable_attention_slicing(a )
def A_ ( self : Any ) ->str:
set_requires_grad(self.vae , a )
def A_ ( self : int ) ->Any:
set_requires_grad(self.vae , a )
def A_ ( self : Optional[int] ) ->Optional[Any]:
set_requires_grad(self.unet , a )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
set_requires_grad(self.unet , a )
def A_ ( self : int , a : Dict , a : Dict , a : int ) ->Optional[int]:
# get the original timestep using init_timestep
SCREAMING_SNAKE_CASE__ : int = min(int(num_inference_steps * strength ) , a )
SCREAMING_SNAKE_CASE__ : Any = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A_ ( self : List[str] , a : Dict , a : List[Any] , a : List[str] , a : int , a : List[Any] , a : Dict=None ) ->Any:
if not isinstance(a , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(a )}""" )
SCREAMING_SNAKE_CASE__ : int = image.to(device=a , dtype=a )
if isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(a , dim=0 )
else:
SCREAMING_SNAKE_CASE__ : List[str] = self.vae.encode(a ).latent_dist.sample(a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__ : str = 0.1_8215 * init_latents
SCREAMING_SNAKE_CASE__ : Union[str, Any] = init_latents.repeat_interleave(a , dim=0 )
SCREAMING_SNAKE_CASE__ : List[str] = randn_tensor(init_latents.shape , generator=a , device=a , dtype=a )
# get latents
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler.add_noise(a , a , a )
SCREAMING_SNAKE_CASE__ : List[Any] = init_latents
return latents
def A_ ( self : Any , a : int ) ->Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.coca_transform(a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE__ : List[str] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
SCREAMING_SNAKE_CASE__ : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def A_ ( self : str , a : Optional[int] , a : List[Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : str = self.feature_extractor.preprocess(a )
SCREAMING_SNAKE_CASE__ : List[str] = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.clip_model.get_image_features(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
SCREAMING_SNAKE_CASE__ : str = image_embeddings_clip.repeat_interleave(a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def A_ ( self : Optional[Any] , a : Optional[int] , a : Optional[int] , a : str , a : Dict , a : Dict , a : List[Any] , a : Union[str, Any] , ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = latents.detach().requires_grad_()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.scheduler.scale_model_input(a , a )
# predict the noise residual
SCREAMING_SNAKE_CASE__ : Optional[int] = self.unet(a , a , encoder_hidden_states=a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sqrt(a )
SCREAMING_SNAKE_CASE__ : int = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , a ):
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler.sigmas[index]
SCREAMING_SNAKE_CASE__ : Optional[int] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__ : List[Any] = 1 / 0.1_8215 * sample
SCREAMING_SNAKE_CASE__ : Tuple = self.vae.decode(a ).sample
SCREAMING_SNAKE_CASE__ : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : int = transforms.Resize(self.feature_extractor_size )(a )
SCREAMING_SNAKE_CASE__ : Tuple = self.normalize(a ).to(latents.dtype )
SCREAMING_SNAKE_CASE__ : int = self.clip_model.get_image_features(a )
SCREAMING_SNAKE_CASE__ : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = spherical_dist_loss(a , a ).mean() * clip_guidance_scale
SCREAMING_SNAKE_CASE__ : Tuple = -torch.autograd.grad(a , a )[0]
if isinstance(self.scheduler , a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = latents.detach() + grads * (sigma**2)
SCREAMING_SNAKE_CASE__ : Dict = noise_pred_original
else:
SCREAMING_SNAKE_CASE__ : List[Any] = noise_pred_original - torch.sqrt(a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , a : str , a : Optional[int] , a : Dict = None , a : Union[str, Any] = None , a : Dict = 5_12 , a : Tuple = 5_12 , a : Dict = 0.6 , a : List[str] = 50 , a : Tuple = 7.5 , a : Dict = 1 , a : int = 0.0 , a : Optional[Any] = 1_00 , a : Dict = None , a : Any = "pil" , a : str = True , a : Dict = 0.8 , a : Any = 0.1 , a : Union[str, Any] = 0.1 , ) ->Optional[Any]:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(a )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(a , torch.Generator ) and batch_size > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [generator] + [None] * (batch_size - 1)
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
SCREAMING_SNAKE_CASE__ : List[Any] = [x[0] for x in coca_is_none if x[1]]
SCREAMING_SNAKE_CASE__ : Dict = ''', '''.join(a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(a ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_image_description(a )
if style_prompt is None:
if len(a ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_image_description(a )
# get prompt text embeddings for content and style
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(
a , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors="pt" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(
a , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors="pt" , )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = slerp(a , a , a )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_embeddings.repeat_interleave(a , dim=0 )
# set timesteps
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
SCREAMING_SNAKE_CASE__ : Tuple = {}
if accepts_offset:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
self.scheduler.set_timesteps(a , **a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_timesteps(a , a , self.device )
SCREAMING_SNAKE_CASE__ : Tuple = timesteps[:1].repeat(a )
# Preprocess image
SCREAMING_SNAKE_CASE__ : Dict = preprocess(a , a , a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
SCREAMING_SNAKE_CASE__ : Dict = preprocess(a , a , a )
SCREAMING_SNAKE_CASE__ : int = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
SCREAMING_SNAKE_CASE__ : str = slerp(a , a , a )
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_clip_image_embeddings(a , a )
SCREAMING_SNAKE_CASE__ : Any = self.get_clip_image_embeddings(a , a )
SCREAMING_SNAKE_CASE__ : Dict = slerp(
a , a , a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE__ : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : Optional[Any] = content_text_input.input_ids.shape[-1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer([""] , padding="max_length" , max_length=a , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
SCREAMING_SNAKE_CASE__ : int = uncond_embeddings.repeat_interleave(a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE__ : Optional[int] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE__ : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
SCREAMING_SNAKE_CASE__ : Dict = torch.randn(a , generator=a , device="cpu" , dtype=a ).to(
self.device )
else:
SCREAMING_SNAKE_CASE__ : str = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
SCREAMING_SNAKE_CASE__ : str = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__ : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__ : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
if accepts_eta:
SCREAMING_SNAKE_CASE__ : List[Any] = eta
# check if the scheduler accepts generator
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
SCREAMING_SNAKE_CASE__ : Tuple = generator
with self.progress_bar(total=a ):
for i, t in enumerate(a ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler.scale_model_input(a , a )
# predict the noise residual
SCREAMING_SNAKE_CASE__ : Any = self.unet(a , a , encoder_hidden_states=a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : Any = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
SCREAMING_SNAKE_CASE__ : int = self.cond_fn(
a , a , a , a , a , a , a , )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : int = self.scheduler.step(a , a , a , **a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE__ : List[Any] = 1 / 0.1_8215 * latents
SCREAMING_SNAKE_CASE__ : Tuple = self.vae.decode(a ).sample
SCREAMING_SNAKE_CASE__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 718 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 0 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = TransfoXLTokenizer
snake_case_ = False
snake_case_ = False
def A_ ( self : str ) ->Optional[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def A_ ( self : Dict , **a : List[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **a )
def A_ ( self : Optional[Any] , a : int ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] = "<unk> UNwanted , running"
SCREAMING_SNAKE_CASE__ : int = "<unk> unwanted, running"
return input_text, output_text
def A_ ( self : Any ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[Any] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=a )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(a , ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [0, 4, 8, 7] )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TransfoXLTokenizer(lower_case=a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] )
def A_ ( self : int ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TransfoXLTokenizer(lower_case=a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def A_ ( self : int ) ->int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TransfoXLTokenizer(lower_case=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?"
SCREAMING_SNAKE_CASE__ : List[Any] = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"\'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"\'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(a ) , a )
self.assertEqual(tokenizer.convert_tokens_to_string(a ) , a )
def A_ ( self : Optional[Any] ) ->str:
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Dict = len(a )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(a ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , "new1" )
| 719 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
return False
SCREAMING_SNAKE_CASE__ : int = len(lowerCamelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCamelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCamelCase__ )
if __name__ == "__main__":
__lowercase :int = input("Enter numbers separated by comma:\n").strip()
__lowercase :Optional[int] = [int(item.strip()) for item in user_input.split(",")]
__lowercase :int = int(input("Enter the number to be found in the list:\n").strip())
__lowercase :Tuple = "" if binary_search(sequence, target) else "not "
print(f"{target} was {not_str}found in {sequence}")
| 720 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 0 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCAmelCase ( _lowerCamelCase : List[str] ):
'''simple docstring'''
if is_torch_version("<" , "2.0.0" ) or not hasattr(_lowerCamelCase , "_dynamo" ):
return False
return isinstance(_lowerCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_compiled_module(_lowerCamelCase )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model
SCREAMING_SNAKE_CASE__ : List[str] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE__ : Any = getattr(_lowerCamelCase , "forward" )
SCREAMING_SNAKE_CASE__ : Any = model.__dict__.pop("_original_forward" , _lowerCamelCase )
if original_forward is not None:
while hasattr(_lowerCamelCase , "__wrapped__" ):
SCREAMING_SNAKE_CASE__ : List[Any] = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE__ : Optional[int] = forward
if getattr(_lowerCamelCase , "_converted_to_transformer_engine" , _lowerCamelCase ):
convert_model(_lowerCamelCase , to_transformer_engine=_lowerCamelCase )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Optional[int] = model
SCREAMING_SNAKE_CASE__ : List[str] = compiled_model
return model
def UpperCAmelCase ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowerCamelCase , _lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_lowerCamelCase , _lowerCamelCase )
@contextmanager
def UpperCAmelCase ( **_lowerCamelCase : Optional[Any] ):
'''simple docstring'''
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE__ : List[Any] = str(_lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
if not hasattr(_lowerCamelCase , "__qualname__" ) and not hasattr(_lowerCamelCase , "__name__" ):
SCREAMING_SNAKE_CASE__ : Any = getattr(_lowerCamelCase , "__class__" , _lowerCamelCase )
if hasattr(_lowerCamelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(_lowerCamelCase , "__name__" ):
return obj.__name__
return str(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : List[str] ):
'''simple docstring'''
for key, value in source.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = destination.setdefault(_lowerCamelCase , {} )
merge_dicts(_lowerCamelCase , _lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : int = value
return destination
def UpperCAmelCase ( _lowerCamelCase : int = None ):
'''simple docstring'''
if port is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 721 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Tuple ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : str = BlipImageProcessor()
SCREAMING_SNAKE_CASE__ : int = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE__ : Dict = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
SCREAMING_SNAKE_CASE__ : str = InstructBlipProcessor(a , a , a )
processor.save_pretrained(self.tmpdirname )
def A_ ( self : int , **a : Optional[Any] ) ->str:
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).tokenizer
def A_ ( self : Tuple , **a : str ) ->Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def A_ ( self : Tuple , **a : int ) ->Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).qformer_tokenizer
def A_ ( self : Tuple ) ->Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A_ ( self : Optional[int] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[str] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_image_processor(do_normalize=a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Dict = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
self.assertIsInstance(processor.qformer_tokenizer , a )
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : Any = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(a , return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = processor(images=a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A_ ( self : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : int = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
SCREAMING_SNAKE_CASE__ : Dict = "lower newer"
SCREAMING_SNAKE_CASE__ : Any = processor(text=a )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer(a , return_token_type_ids=a )
SCREAMING_SNAKE_CASE__ : str = qformer_tokenizer(a , return_token_type_ids=a )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def A_ ( self : List[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "lower newer"
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processor(text=a , images=a )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def A_ ( self : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
SCREAMING_SNAKE_CASE__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ : int = processor.batch_decode(a )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.batch_decode(a )
self.assertListEqual(a , a )
def A_ ( self : Any ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
SCREAMING_SNAKE_CASE__ : Dict = "lower newer"
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = processor(text=a , images=a )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 700 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :Optional[Any] = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Optional[int] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowercase :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = old_name
if "patch_embed" in old_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = old_name.split("." )
if layer == "0":
SCREAMING_SNAKE_CASE__ : Tuple = old_name.replace("0" , "convolution1" )
elif layer == "1":
SCREAMING_SNAKE_CASE__ : int = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
SCREAMING_SNAKE_CASE__ : List[str] = old_name.replace("3" , "convolution2" )
else:
SCREAMING_SNAKE_CASE__ : List[str] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = r"\b\d{2}\b"
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.search(r"\d\.\d\d." , _lowerCamelCase ).group()
else:
SCREAMING_SNAKE_CASE__ : str = re.search(r"\d\.\d." , _lowerCamelCase ).group()
if int(match[0] ) < 6:
SCREAMING_SNAKE_CASE__ : Tuple = old_name.replace(_lowerCamelCase , "" )
SCREAMING_SNAKE_CASE__ : List[Any] = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "intermediate_stages." + trimmed_name
else:
SCREAMING_SNAKE_CASE__ : str = old_name.replace(_lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
SCREAMING_SNAKE_CASE__ : str = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = str(int(match[2] ) - num_meta4D_last_stage )
SCREAMING_SNAKE_CASE__ : List[str] = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
SCREAMING_SNAKE_CASE__ : Optional[int] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
SCREAMING_SNAKE_CASE__ : List[Any] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
SCREAMING_SNAKE_CASE__ : Dict = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = trimmed_name.replace("fc2" , "linear_out" )
SCREAMING_SNAKE_CASE__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
SCREAMING_SNAKE_CASE__ : Dict = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
SCREAMING_SNAKE_CASE__ : Optional[int] = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
SCREAMING_SNAKE_CASE__ : Dict = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
SCREAMING_SNAKE_CASE__ : int = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
SCREAMING_SNAKE_CASE__ : List[Any] = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_name.replace("norm" , "layernorm" )
SCREAMING_SNAKE_CASE__ : Dict = "efficientformer." + new_name
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "efficientformer.encoder." + new_name
return new_name
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
for key in checkpoint.copy().keys():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = checkpoint.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = val
return checkpoint
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def UpperCAmelCase ( _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
SCREAMING_SNAKE_CASE__ : Tuple = EfficientFormerConfig.from_json_file(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
SCREAMING_SNAKE_CASE__ : Dict = config.depths[-1] - config.num_metaad_blocks + 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : str = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
SCREAMING_SNAKE_CASE__ : int = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = 256
SCREAMING_SNAKE_CASE__ : int = 224
SCREAMING_SNAKE_CASE__ : Dict = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
SCREAMING_SNAKE_CASE__ : List[str] = processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
SCREAMING_SNAKE_CASE__ : str = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = outputs.logits
SCREAMING_SNAKE_CASE__ : Tuple = (1, 1_000)
if "l1" in model_name:
SCREAMING_SNAKE_CASE__ : Dict = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
SCREAMING_SNAKE_CASE__ : int = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(_lowerCamelCase )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
__lowercase :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
__lowercase :List[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 702 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :Tuple = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :str = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :Union[str, Any] = logging.get_logger(__name__)
__lowercase :Any = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "xglm"
snake_case_ = ["past_key_values"]
snake_case_ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , a : int=25_60_08 , a : str=20_48 , a : Union[str, Any]=10_24 , a : str=40_96 , a : Optional[Any]=24 , a : Union[str, Any]=16 , a : Optional[Any]="gelu" , a : List[Any]=0.1 , a : Dict=0.1 , a : Any=0.0 , a : Optional[int]=0.0 , a : Union[str, Any]=0.02 , a : List[Any]=True , a : Optional[Any]=True , a : Union[str, Any]=2 , a : str=1 , a : Union[str, Any]=0 , a : List[Any]=2 , **a : Any , ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = d_model
SCREAMING_SNAKE_CASE__ : Any = ffn_dim
SCREAMING_SNAKE_CASE__ : str = num_layers
SCREAMING_SNAKE_CASE__ : List[Any] = attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE__ : Optional[int] = dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = layerdrop
SCREAMING_SNAKE_CASE__ : int = init_std
SCREAMING_SNAKE_CASE__ : str = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : str = use_cache
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 704 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CTRLTokenizer
snake_case_ = False
snake_case_ = False
def A_ ( self : Optional[Any] ) ->int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : List[Any] = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(zip(a , range(len(a ) ) ) )
SCREAMING_SNAKE_CASE__ : List[str] = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
def A_ ( self : str , **a : List[str] ) ->List[Any]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **a )
def A_ ( self : int , a : Any ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = "adapt react readapt apt"
SCREAMING_SNAKE_CASE__ : List[Any] = "adapt react readapt apt"
return input_text, output_text
def A_ ( self : Union[str, Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : List[Any] = "adapt react readapt apt"
SCREAMING_SNAKE_CASE__ : Tuple = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
SCREAMING_SNAKE_CASE__ : int = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[int] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
| 705 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Any , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : Tuple , ) ->Any:
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
SCREAMING_SNAKE_CASE__ : int = field
SCREAMING_SNAKE_CASE__ : int = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ : Tuple = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def A_ ( self : List[str] ) ->Any:
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Optional[Any] , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : Any , ) ->Any:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
SCREAMING_SNAKE_CASE__ : Tuple = dataset
SCREAMING_SNAKE_CASE__ : List[str] = path_or_buf
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : Any = num_proc
SCREAMING_SNAKE_CASE__ : Optional[int] = "utf-8"
SCREAMING_SNAKE_CASE__ : Any = to_json_kwargs
def A_ ( self : Union[str, Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = self.to_json_kwargs.pop("path_or_buf" , a )
SCREAMING_SNAKE_CASE__ : List[str] = self.to_json_kwargs.pop("orient" , "records" )
SCREAMING_SNAKE_CASE__ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
SCREAMING_SNAKE_CASE__ : Any = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
SCREAMING_SNAKE_CASE__ : Optional[int] = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
SCREAMING_SNAKE_CASE__ : Any = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def A_ ( self : List[Any] , a : int ) ->str:
SCREAMING_SNAKE_CASE__ : Dict = args
SCREAMING_SNAKE_CASE__ : str = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def A_ ( self : str , a : BinaryIO , a : Optional[Any] , a : List[Any] , a : str , **a : Optional[int] , ) ->int:
SCREAMING_SNAKE_CASE__ : List[str] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
SCREAMING_SNAKE_CASE__ : int = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
SCREAMING_SNAKE_CASE__ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 706 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : int ):
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_lowerCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 0 |
from __future__ import annotations
from collections import deque
class _a :
"""simple docstring"""
def __init__( self : Tuple , a : list[str] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(a )
self.set_fail_transitions()
def A_ ( self : int , a : int , a : str ) ->int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def A_ ( self : str , a : str ) ->None:
SCREAMING_SNAKE_CASE__ : int = 0
for character in keyword:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.find_next_state(a , a )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = next_state
self.adlist[current_state]["output"].append(a )
def A_ ( self : Any ) ->None:
SCREAMING_SNAKE_CASE__ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(a )
SCREAMING_SNAKE_CASE__ : Dict = 0
while q:
SCREAMING_SNAKE_CASE__ : Optional[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(a )
SCREAMING_SNAKE_CASE__ : Dict = self.adlist[r]["fail_state"]
while (
self.find_next_state(a , self.adlist[child]["value"] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__ : Dict = self.adlist[state]["fail_state"]
SCREAMING_SNAKE_CASE__ : int = self.find_next_state(
a , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : str = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def A_ ( self : int , a : str ) ->dict[str, list[int]]:
SCREAMING_SNAKE_CASE__ : dict = {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__ : str = 0
for i in range(len(a ) ):
while (
self.find_next_state(a , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__ : int = self.adlist[current_state]["fail_state"]
SCREAMING_SNAKE_CASE__ : Any = self.find_next_state(a , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__ : str = 0
else:
SCREAMING_SNAKE_CASE__ : List[str] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
result[key].append(i - len(a ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__lowercase :List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple , a : WhisperForConditionalGeneration , a : WhisperProcessor , a : AutoencoderKL , a : CLIPTextModel , a : CLIPTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a : StableDiffusionSafetyChecker , a : CLIPImageProcessor , ) ->Union[str, Any]:
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=a , speech_processor=a , vae=a , text_encoder=a , tokenizer=a , unet=a , scheduler=a , feature_extractor=a , )
def A_ ( self : List[Any] , a : Optional[Union[str, int]] = "auto" ) ->int:
if slice_size == "auto":
SCREAMING_SNAKE_CASE__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def A_ ( self : List[Any] ) ->str:
self.enable_attention_slicing(a )
@torch.no_grad()
def __call__( self : str , a : str , a : Tuple=1_60_00 , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : List[Any] , ) ->Any:
SCREAMING_SNAKE_CASE__ : List[str] = self.speech_processor.feature_extractor(
a , return_tensors="pt" , sampling_rate=a ).input_features.to(self.device )
SCREAMING_SNAKE_CASE__ : str = self.speech_model.generate(a , max_length=48_00_00 )
SCREAMING_SNAKE_CASE__ : int = self.speech_processor.tokenizer.batch_decode(a , skip_special_tokens=a , normalize=a )[
0
]
if isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : str = 1
elif isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a , a ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a )}.""" )
# get prompt text embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(
a , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
SCREAMING_SNAKE_CASE__ : List[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE__ : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE__ : int = text_embeddings.shape
SCREAMING_SNAKE_CASE__ : Dict = text_embeddings.repeat(1 , a , 1 )
SCREAMING_SNAKE_CASE__ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE__ : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE__ : Dict = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : Any = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
SCREAMING_SNAKE_CASE__ : List[str] = negative_prompt
SCREAMING_SNAKE_CASE__ : Optional[int] = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(
a , padding="max_length" , max_length=a , truncation=a , return_tensors="pt" , )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE__ : str = uncond_embeddings.shape[1]
SCREAMING_SNAKE_CASE__ : Any = uncond_embeddings.repeat(1 , a , 1 )
SCREAMING_SNAKE_CASE__ : Any = uncond_embeddings.view(batch_size * num_images_per_prompt , a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE__ : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
SCREAMING_SNAKE_CASE__ : List[str] = torch.randn(a , generator=a , device="cpu" , dtype=a ).to(
self.device )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__ : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__ : int = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
if accepts_eta:
SCREAMING_SNAKE_CASE__ : str = eta
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.scheduler.scale_model_input(a , a )
# predict the noise residual
SCREAMING_SNAKE_CASE__ : List[Any] = self.unet(a , a , encoder_hidden_states=a ).sample
# perform guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : Optional[Any] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler.step(a , a , a , **a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = 1 / 0.1_8215 * latents
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.vae.decode(a ).sample
SCREAMING_SNAKE_CASE__ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE__ : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : int = self.numpy_to_pil(a )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 709 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
@property
def A_ ( self : Dict ) ->List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE__ : Any = False
return options
def A_ ( self : str ) ->str:
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE__ : Dict = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : List[Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE__ : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
SCREAMING_SNAKE_CASE__ : int = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A_ ( self : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
SCREAMING_SNAKE_CASE__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
SCREAMING_SNAKE_CASE__ : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
SCREAMING_SNAKE_CASE__ : List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A red cat sitting on a park bench"
SCREAMING_SNAKE_CASE__ : List[Any] = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : str = pipe(
prompt=a , image=a , mask_image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type="np" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 710 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase :Tuple = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :List[Any] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
__lowercase :Dict = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
__lowercase :Union[str, Any] = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
__lowercase :Union[str, Any] = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 0 |
from math import factorial
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_lowerCamelCase ) // (factorial(_lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
F"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
"If a class of 40 students must be arranged into groups of",
F"4 for group projects, there are {combinations(40, 4)} ways",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
F"are {combinations(10, 3)} ways that first, second and",
"third place can be awarded.",
)
| 712 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _a :
"""simple docstring"""
def __init__( self : List[str] , a : Collection[float] | None = None ) ->None:
if components is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = list(a )
def __len__( self : Dict ) ->int:
return len(self.__components )
def __str__( self : Union[str, Any] ) ->str:
return "(" + ",".join(map(a , self.__components ) ) + ")"
def __add__( self : Optional[Any] , a : Vector ) ->Vector:
SCREAMING_SNAKE_CASE__ : Any = len(self )
if size == len(a ):
SCREAMING_SNAKE_CASE__ : Tuple = [self.__components[i] + other.component(a ) for i in range(a )]
return Vector(a )
else:
raise Exception("must have the same size" )
def __sub__( self : Any , a : Vector ) ->Vector:
SCREAMING_SNAKE_CASE__ : Dict = len(self )
if size == len(a ):
SCREAMING_SNAKE_CASE__ : List[str] = [self.__components[i] - other.component(a ) for i in range(a )]
return Vector(a )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : Any , a : float ) ->Vector:
...
@overload
def __mul__( self : Optional[int] , a : Vector ) ->float:
...
def __mul__( self : int , a : float | Vector ) ->float | Vector:
if isinstance(a , (float, int) ):
SCREAMING_SNAKE_CASE__ : List[str] = [c * other for c in self.__components]
return Vector(a )
elif isinstance(a , a ) and len(self ) == len(a ):
SCREAMING_SNAKE_CASE__ : Any = len(self )
SCREAMING_SNAKE_CASE__ : Any = [self.__components[i] * other.component(a ) for i in range(a )]
return sum(a )
else: # error case
raise Exception("invalid operand!" )
def A_ ( self : Optional[int] ) ->Vector:
return Vector(self.__components )
def A_ ( self : Dict , a : int ) ->float:
if isinstance(a , a ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def A_ ( self : Union[str, Any] , a : int , a : float ) ->None:
assert -len(self.__components ) <= pos < len(self.__components )
SCREAMING_SNAKE_CASE__ : Any = value
def A_ ( self : str ) ->float:
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(a ) )
def A_ ( self : Dict , a : Vector , a : bool = False ) ->float:
SCREAMING_SNAKE_CASE__ : Dict = self * other
SCREAMING_SNAKE_CASE__ : Any = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
return Vector([0] * dimension )
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (isinstance(_lowerCamelCase , _lowerCamelCase ))
SCREAMING_SNAKE_CASE__ : str = [0] * dimension
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
return Vector(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : Vector , _lowerCamelCase : Vector ):
'''simple docstring'''
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and (isinstance(_lowerCamelCase , (int, float) ))
)
return x * scalar + y
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
random.seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [random.randint(_lowerCamelCase , _lowerCamelCase ) for _ in range(_lowerCamelCase )]
return Vector(_lowerCamelCase )
class _a :
"""simple docstring"""
def __init__( self : Optional[Any] , a : list[list[float]] , a : int , a : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = matrix
SCREAMING_SNAKE_CASE__ : Dict = w
SCREAMING_SNAKE_CASE__ : int = h
def __str__( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[int] = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , a : Matrix ) ->Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ : Any = [
self.__matrix[i][j] + other.component(a , a )
for j in range(self.__width )
]
matrix.append(a )
return Matrix(a , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : int , a : Matrix ) ->Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ : List[Any] = [
self.__matrix[i][j] - other.component(a , a )
for j in range(self.__width )
]
matrix.append(a )
return Matrix(a , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Tuple , a : float ) ->Matrix:
...
@overload
def __mul__( self : int , a : Vector ) ->Vector:
...
def __mul__( self : Dict , a : float | Vector ) ->Vector | Matrix:
if isinstance(a , a ): # matrix-vector
if len(a ) == self.__width:
SCREAMING_SNAKE_CASE__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [
self.__matrix[i][j] * other.component(a )
for j in range(self.__width )
]
ans.change_component(a , sum(a ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(a , (int, float) ): # matrix-scalar
SCREAMING_SNAKE_CASE__ : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(a , self.__width , self.__height )
return None
def A_ ( self : List[str] ) ->int:
return self.__height
def A_ ( self : int ) ->int:
return self.__width
def A_ ( self : str , a : int , a : int ) ->float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def A_ ( self : Any , a : int , a : int , a : float ) ->None:
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE__ : Dict = value
else:
raise Exception("change_component: indices out of bounds" )
def A_ ( self : Optional[Any] , a : int , a : int ) ->float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(a ) ):
SCREAMING_SNAKE_CASE__ : Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(a , self.__width - 1 , self.__height - 1 ).determinant()
def A_ ( self : Dict , a : int , a : int ) ->float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(a , a )
else:
raise Exception("Indices out of bounds" )
def A_ ( self : List[str] ) ->float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE__ : Dict = [
self.__matrix[0][y] * self.cofactor(0 , a ) for y in range(self.__width )
]
return sum(a )
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[list[float]] = [[0] * n for _ in range(_lowerCamelCase )]
return Matrix(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
random.seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : list[list[float]] = [
[random.randint(_lowerCamelCase , _lowerCamelCase ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )
]
return Matrix(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : str ) ->Any:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A_ ( self : Optional[Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(a )
def A_ ( self : int ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(a )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(a ):
self.assertDictEqual(a , example_records[i] )
def A_ ( self : Any ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Tuple = Dataset.from_list(a )
SCREAMING_SNAKE_CASE__ : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A_ ( self : Optional[Any] ) ->List[Any]: # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : Optional[int] = [{"col_1": 1}, {"col_2": "x"}]
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_list(a )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A_ ( self : Tuple ) ->List[str]: # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : Any = [{"col_1": []}, {"col_1": [1, 2]}]
SCREAMING_SNAKE_CASE__ : List[str] = Dataset.from_list(a )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A_ ( self : int ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = Dataset.from_list([] )
self.assertEqual(len(a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 714 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 0 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : tuple[int, int] , _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = position
SCREAMING_SNAKE_CASE__ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for position in positions:
SCREAMING_SNAKE_CASE__ : Optional[int] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_lowerCamelCase )
return permissible_positions
def UpperCAmelCase ( _lowerCamelCase : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def UpperCAmelCase ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : int ):
'''simple docstring'''
if is_complete(_lowerCamelCase ):
return True
for position in get_valid_pos(_lowerCamelCase , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = position
if board[y][x] == 0:
SCREAMING_SNAKE_CASE__ : int = curr + 1
if open_knight_tour_helper(_lowerCamelCase , _lowerCamelCase , curr + 1 ):
return True
SCREAMING_SNAKE_CASE__ : List[str] = 0
return False
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : int = 1
if open_knight_tour_helper(_lowerCamelCase , (i, j) , 1 ):
return board
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Dict = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , a : int | None = None ) ->str:
SCREAMING_SNAKE_CASE__ : Dict = value
SCREAMING_SNAKE_CASE__ : Node | None = None # Added in order to delete a node easier
SCREAMING_SNAKE_CASE__ : Node | None = None
SCREAMING_SNAKE_CASE__ : Node | None = None
def __repr__( self : int ) ->str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : Node | None = None ) ->Any:
SCREAMING_SNAKE_CASE__ : Dict = root
def __str__( self : str ) ->str:
return str(self.root )
def A_ ( self : str , a : Node , a : Node | None ) ->None:
if new_children is not None: # reset its kids
SCREAMING_SNAKE_CASE__ : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(a ): # If it is the right children
SCREAMING_SNAKE_CASE__ : Dict = new_children
else:
SCREAMING_SNAKE_CASE__ : Any = new_children
else:
SCREAMING_SNAKE_CASE__ : List[str] = new_children
def A_ ( self : str , a : Node ) ->bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def A_ ( self : Union[str, Any] ) ->bool:
return self.root is None
def A_ ( self : Optional[int] , a : Dict ) ->None:
SCREAMING_SNAKE_CASE__ : List[Any] = Node(a ) # create a new Node
if self.empty(): # if Tree is empty
SCREAMING_SNAKE_CASE__ : Dict = new_node # set its root
else: # Tree is not empty
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
SCREAMING_SNAKE_CASE__ : List[Any] = new_node # We insert the new node in a leaf
break
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_node
break
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent_node.right
SCREAMING_SNAKE_CASE__ : List[str] = parent_node
def A_ ( self : Union[str, Any] , *a : int ) ->None:
for value in values:
self.__insert(a )
def A_ ( self : Any , a : Any ) ->Node | None:
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
SCREAMING_SNAKE_CASE__ : Tuple = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = node.left if value < node.value else node.right
return node
def A_ ( self : Optional[Any] , a : Node | None = None ) ->Node | None:
if node is None:
if self.root is None:
return None
SCREAMING_SNAKE_CASE__ : str = self.root
if not self.empty():
while node.right is not None:
SCREAMING_SNAKE_CASE__ : int = node.right
return node
def A_ ( self : List[Any] , a : Node | None = None ) ->Node | None:
if node is None:
SCREAMING_SNAKE_CASE__ : Tuple = self.root
if self.root is None:
return None
if not self.empty():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.root
while node.left is not None:
SCREAMING_SNAKE_CASE__ : Tuple = node.left
return node
def A_ ( self : Optional[Any] , a : int ) ->None:
SCREAMING_SNAKE_CASE__ : Dict = self.search(a ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(a , a )
elif node.left is None: # Has only right children
self.__reassign_nodes(a , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(a , node.left )
else:
SCREAMING_SNAKE_CASE__ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def A_ ( self : Any , a : Node | None ) ->Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def A_ ( self : str , a : List[Any]=None ) ->Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def A_ ( self : List[Any] , a : list , a : Node | None ) ->None:
if node:
self.inorder(a , node.left )
arr.append(node.value )
self.inorder(a , node.right )
def A_ ( self : Optional[Any] , a : int , a : Node ) ->int:
SCREAMING_SNAKE_CASE__ : list[int] = []
self.inorder(a , a ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCAmelCase ( _lowerCamelCase : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
if curr_node is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
SCREAMING_SNAKE_CASE__ : List[str] = BinarySearchTree()
for i in testlist:
t.insert(_lowerCamelCase )
# Prints all the elements of the list in order traversal
print(_lowerCamelCase )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowerCamelCase )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 716 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
SCREAMING_SNAKE_CASE__ : Optional[int] = min(_lowerCamelCase , _lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 717 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AltDiffusionPipeline
snake_case_ = TEXT_TO_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Optional[int] ) ->Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
SCREAMING_SNAKE_CASE__ : Tuple = CLIPTextModel(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 77
SCREAMING_SNAKE_CASE__ : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A_ ( self : Tuple , a : List[str] , a : int=0 ) ->Union[str, Any]:
if str(a ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(a )
else:
SCREAMING_SNAKE_CASE__ : List[str] = torch.Generator(device=a ).manual_seed(a )
SCREAMING_SNAKE_CASE__ : int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A_ ( self : int ) ->List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def A_ ( self : Optional[int] ) ->Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def A_ ( self : List[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ : Union[str, Any] = RobertaSeriesModelWithTransformation(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_encoder
SCREAMING_SNAKE_CASE__ : str = AltDiffusionPipeline(**a )
SCREAMING_SNAKE_CASE__ : str = alt_pipe.to(a )
alt_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE__ : Any = "A photo of an astronaut"
SCREAMING_SNAKE_CASE__ : List[Any] = alt_pipe(**a )
SCREAMING_SNAKE_CASE__ : str = output.images
SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self : Tuple ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Tuple = PNDMScheduler(skip_prk_steps=a )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ : Any = RobertaSeriesModelWithTransformation(a )
SCREAMING_SNAKE_CASE__ : Any = text_encoder
SCREAMING_SNAKE_CASE__ : List[str] = AltDiffusionPipeline(**a )
SCREAMING_SNAKE_CASE__ : List[str] = alt_pipe.to(a )
alt_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE__ : Any = alt_pipe(**a )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) ->str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Optional[int] ) ->Union[str, Any]:
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : Any = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=a )
SCREAMING_SNAKE_CASE__ : Optional[int] = alt_pipe.to(a )
alt_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = alt_pipe([prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
SCREAMING_SNAKE_CASE__ : str = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=a , safety_checker=a )
SCREAMING_SNAKE_CASE__ : Tuple = alt_pipe.to(a )
alt_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : int = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = alt_pipe([prompt] , generator=a , num_inference_steps=2 , output_type="numpy" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Dict = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 718 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 719 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
"""simple docstring"""
snake_case_ = None
def A_ ( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : int = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , a )
def A_ ( self : str ) ->List[str]:
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(a , "feat_extract.json" )
feat_extract_first.to_json_file(a )
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class.from_json_file(a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A_ ( self : Optional[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract_first.save_pretrained(a )[0]
check_json_file_has_correct_format(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class()
self.assertIsNotNone(a )
| 720 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase :List[str] = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "ernie_m"
snake_case_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Any , a : int = 25_00_02 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_14 , a : float = 0.02 , a : int = 1 , a : float = 1E-05 , a : Optional[Any]=None , a : Any=False , a : int=0.0 , **a : Dict , ) ->Optional[int]:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = classifier_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_decoder
SCREAMING_SNAKE_CASE__ : Dict = act_dropout
| 721 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
class _a ( lowercase__ , lowercase__ ):
"""simple docstring"""
snake_case_ = 1
@register_to_config
def __init__( self : Dict , a : int = 20_00 , a : float = 0.15 , a : float = 0.01 , a : float = 1348.0 , a : float = 1E-5 , a : int = 1 , ) ->int:
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ : Any = sigma_max
# setable values
SCREAMING_SNAKE_CASE__ : Optional[int] = None
self.set_sigmas(a , a , a , a )
def A_ ( self : Any , a : torch.FloatTensor , a : Optional[int] = None ) ->torch.FloatTensor:
return sample
def A_ ( self : Tuple , a : int , a : float = None , a : Union[str, torch.device] = None ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
SCREAMING_SNAKE_CASE__ : str = torch.linspace(1 , a , a , device=a )
def A_ ( self : Optional[Any] , a : int , a : float = None , a : float = None , a : float = None ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
SCREAMING_SNAKE_CASE__ : List[str] = sigma_max if sigma_max is not None else self.config.sigma_max
SCREAMING_SNAKE_CASE__ : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(a , a )
SCREAMING_SNAKE_CASE__ : Tuple = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.exp(torch.linspace(math.log(a ) , math.log(a ) , a ) )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def A_ ( self : Optional[Any] , a : str , a : Tuple ) ->Dict:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def A_ ( self : Optional[int] , a : torch.FloatTensor , a : int , a : torch.FloatTensor , a : Optional[torch.Generator] = None , a : bool = True , ) ->Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
SCREAMING_SNAKE_CASE__ : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
SCREAMING_SNAKE_CASE__ : str = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
SCREAMING_SNAKE_CASE__ : List[Any] = timesteps.to(self.discrete_sigmas.device )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_adjacent_sigma(a , a ).to(sample.device )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.zeros_like(a )
SCREAMING_SNAKE_CASE__ : List[str] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
SCREAMING_SNAKE_CASE__ : Optional[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ : Any = diffusion.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
SCREAMING_SNAKE_CASE__ : Tuple = randn_tensor(
sample.shape , layout=sample.layout , generator=a , device=sample.device , dtype=sample.dtype )
SCREAMING_SNAKE_CASE__ : str = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
SCREAMING_SNAKE_CASE__ : str = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=a , prev_sample_mean=a )
def A_ ( self : Optional[Any] , a : torch.FloatTensor , a : torch.FloatTensor , a : Optional[torch.Generator] = None , a : bool = True , ) ->Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
SCREAMING_SNAKE_CASE__ : str = randn_tensor(sample.shape , layout=sample.layout , generator=a ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
SCREAMING_SNAKE_CASE__ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ : Any = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
SCREAMING_SNAKE_CASE__ : Optional[int] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
SCREAMING_SNAKE_CASE__ : Tuple = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ : Tuple = step_size.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ : List[str] = sample + step_size * model_output
SCREAMING_SNAKE_CASE__ : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def A_ ( self : Tuple , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ) ->torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
SCREAMING_SNAKE_CASE__ : Optional[int] = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE__ : Dict = self.discrete_sigmas.to(original_samples.device )[timesteps]
SCREAMING_SNAKE_CASE__ : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(a ) * sigmas[:, None, None, None]
)
SCREAMING_SNAKE_CASE__ : int = noise + original_samples
return noisy_samples
def __len__( self : Dict ) ->Any:
return self.config.num_train_timesteps
| 700 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , a : Optional[int] , a : str=13 , a : Tuple=7 , a : str=True , a : int=True , a : Any=True , a : List[str]=True , a : Dict=99 , a : Dict=[1, 1, 2] , a : Tuple=1 , a : Tuple=32 , a : str=4 , a : Optional[int]=8 , a : Optional[Any]=37 , a : str="gelu_new" , a : Dict=0.1 , a : str=0.1 , a : Optional[int]=0.0 , a : Any=5_12 , a : Optional[Any]=3 , a : List[Any]=0.02 , a : Optional[int]=3 , a : str=4 , a : Any=None , a : Tuple=False , ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : int = seq_length
SCREAMING_SNAKE_CASE__ : List[Any] = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = block_sizes
SCREAMING_SNAKE_CASE__ : int = num_decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = d_model
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_head
SCREAMING_SNAKE_CASE__ : Any = d_head
SCREAMING_SNAKE_CASE__ : int = d_inner
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : int = hidden_dropout
SCREAMING_SNAKE_CASE__ : Dict = attention_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = 2
SCREAMING_SNAKE_CASE__ : Optional[int] = num_labels
SCREAMING_SNAKE_CASE__ : Any = num_choices
SCREAMING_SNAKE_CASE__ : Dict = scope
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE__ : Any = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE__ : Tuple = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE__ : Optional[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_hidden_layers + 2
def A_ ( self : Any ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : int = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def A_ ( self : Any , a : Union[str, Any] , a : Optional[int] , a : Tuple , a : Any , a : Tuple , a : Tuple , a : str , ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFFunnelModel(config=a )
SCREAMING_SNAKE_CASE__ : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a )
SCREAMING_SNAKE_CASE__ : int = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = TFFunnelModel(config=a )
SCREAMING_SNAKE_CASE__ : Dict = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Dict = TFFunnelModel(config=a )
SCREAMING_SNAKE_CASE__ : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def A_ ( self : Any , a : str , a : Any , a : Dict , a : str , a : Union[str, Any] , a : List[str] , a : List[Any] , ) ->str:
SCREAMING_SNAKE_CASE__ : str = TFFunnelBaseModel(config=a )
SCREAMING_SNAKE_CASE__ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a )
SCREAMING_SNAKE_CASE__ : Tuple = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ : Tuple = model(a )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : List[str] = TFFunnelBaseModel(config=a )
SCREAMING_SNAKE_CASE__ : Dict = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFFunnelBaseModel(config=a )
SCREAMING_SNAKE_CASE__ : List[str] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def A_ ( self : Any , a : str , a : Union[str, Any] , a : Union[str, Any] , a : int , a : Any , a : Optional[Any] , a : Tuple , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = TFFunnelForPreTraining(config=a )
SCREAMING_SNAKE_CASE__ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : int , a : int , a : Optional[int] , a : Union[str, Any] , a : int , a : Optional[int] , a : Optional[Any] , a : Tuple , ) ->List[str]:
SCREAMING_SNAKE_CASE__ : str = TFFunnelForMaskedLM(config=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Union[str, Any] , a : Dict , a : List[str] , a : int , a : Optional[Any] , a : Tuple , a : Any , a : Dict , ) ->int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = TFFunnelForSequenceClassification(config=a )
SCREAMING_SNAKE_CASE__ : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : int = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Optional[Any] , a : Optional[Any] , a : str , a : int , a : int , a : Any , a : Optional[Any] , a : Union[str, Any] , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = self.num_choices
SCREAMING_SNAKE_CASE__ : int = TFFunnelForMultipleChoice(config=a )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : List[str] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : str = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE__ : Any = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Optional[Any] , a : Any , a : Dict , a : Union[str, Any] , a : List[str] , a : Tuple , a : Optional[int] , a : Dict , ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = TFFunnelForTokenClassification(config=a )
SCREAMING_SNAKE_CASE__ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Union[str, Any] , a : str , a : Optional[Any] , a : Any , a : Union[str, Any] , a : int , a : str , a : Tuple , ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = TFFunnelForQuestionAnswering(config=a )
SCREAMING_SNAKE_CASE__ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : int = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Optional[int] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE__
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def A_ ( self : Any ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : str = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self , config_class=a )
def A_ ( self : Any ) ->str:
self.config_tester.run_common_tests()
def A_ ( self : int ) ->int:
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def A_ ( self : Union[str, Any] ) ->Any:
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def A_ ( self : Optional[int] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def A_ ( self : Optional[int] ) ->Any:
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
def A_ ( self : Tuple ) ->Any:
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
@require_tf
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case_ = False
snake_case_ = False
def A_ ( self : List[str] ) ->Any:
SCREAMING_SNAKE_CASE__ : List[Any] = TFFunnelModelTester(self , base=a )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=a )
def A_ ( self : Optional[Any] ) ->Union[str, Any]:
self.config_tester.run_common_tests()
def A_ ( self : Any ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*a )
def A_ ( self : Dict ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def A_ ( self : Union[str, Any] ) ->str:
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
| 701 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 0 |
import operator as op
def UpperCAmelCase ( _lowerCamelCase : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = lambda _lowerCamelCase , _lowerCamelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(_lowerCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowerCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " )
else:
SCREAMING_SNAKE_CASE__ : Dict = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " )
SCREAMING_SNAKE_CASE__ : int = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " )
stack.append(
str(opr[x](int(_lowerCamelCase ) , int(_lowerCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
__lowercase :Tuple = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 702 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCamelCase )
if number < 0:
return False
SCREAMING_SNAKE_CASE__ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase :str = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "ibert"
def __init__( self : Optional[Any] , a : Optional[int]=3_05_22 , a : Tuple=7_68 , a : Tuple=12 , a : List[str]=12 , a : str=30_72 , a : str="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Tuple=5_12 , a : Dict=2 , a : str=0.02 , a : Union[str, Any]=1E-12 , a : Tuple=1 , a : List[Any]=0 , a : Optional[Any]=2 , a : List[Any]="absolute" , a : str=False , a : Any="none" , **a : Any , ) ->List[Any]:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = quant_mode
SCREAMING_SNAKE_CASE__ : Union[str, Any] = force_dequant
class _a ( lowercase__ ):
"""simple docstring"""
@property
def A_ ( self : Tuple ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 705 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 0 |
from __future__ import annotations
class _a :
"""simple docstring"""
def __init__( self : Dict , a : str , a : str ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = text, pattern
SCREAMING_SNAKE_CASE__ : Optional[int] = len(a ), len(a )
def A_ ( self : int , a : str ) ->int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A_ ( self : Tuple , a : int ) ->int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A_ ( self : Optional[Any] ) ->list[int]:
# searches pattern in text and returns index positions
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.mismatch_in_text(a )
if mismatch_index == -1:
positions.append(a )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE__ : List[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__lowercase :Any = "ABAABA"
__lowercase :Optional[Any] = "AB"
__lowercase :Union[str, Any] = BoyerMooreSearch(text, pattern)
__lowercase :Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 706 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = int(_lowerCamelCase )
# Initialize Result
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__lowercase :Dict = []
__lowercase :Tuple = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
__lowercase :str = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f"Denomination {i}: ").strip()))
__lowercase :str = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
__lowercase :Dict = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
__lowercase :Tuple = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f"Following is minimal change for {value}: ")
__lowercase :Dict = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 707 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : int = 100 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = n * (n + 1) * (2 * n + 1) / 6
SCREAMING_SNAKE_CASE__ : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"{solution() = }")
| 708 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.