code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[Any]= logging.get_logger(__name__)
A__ : List[str]= {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[Any] ="""autoformer"""
a : Union[str, Any] ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = [1, 2, 3, 4, 5, 6, 7] , snake_case_ = True , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 64 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 32 , snake_case_ = 32 , snake_case_ = "gelu" , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 100 , snake_case_ = 0.02 , snake_case_ = True , snake_case_=True , snake_case_ = 10 , snake_case_ = 25 , snake_case_ = 3 , **snake_case_ , ) -> int:
# time series specific configuration
UpperCamelCase__ = prediction_length
UpperCamelCase__ = context_length if context_length is not None else prediction_length
UpperCamelCase__ = distribution_output
UpperCamelCase__ = loss
UpperCamelCase__ = input_size
UpperCamelCase__ = num_time_features
UpperCamelCase__ = lags_sequence
UpperCamelCase__ = scaling
UpperCamelCase__ = num_dynamic_real_features
UpperCamelCase__ = num_static_real_features
UpperCamelCase__ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCamelCase__ = cardinality
else:
UpperCamelCase__ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCamelCase__ = embedding_dimension
else:
UpperCamelCase__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCamelCase__ = num_parallel_samples
# Transformer architecture configuration
UpperCamelCase__ = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = use_cache
# Autoformer
UpperCamelCase__ = label_length
UpperCamelCase__ = moving_average
UpperCamelCase__ = autocorrelation_factor
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20 | 1 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=3 , snake_case_=4 , snake_case_=2 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=36 , snake_case_=3 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=6 , snake_case_=6 , snake_case_=3 , snake_case_=4 , snake_case_=None , snake_case_=1000 , ) -> Dict:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = text_seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ = text_seq_length
UpperCamelCase__ = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ = bbox[i, j, 3]
UpperCamelCase__ = bbox[i, j, 1]
UpperCamelCase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ = bbox[i, j, 2]
UpperCamelCase__ = bbox[i, j, 0]
UpperCamelCase__ = t
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = LayoutLMvaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# text + image
UpperCamelCase__ = model(snake_case_ , pixel_values=snake_case_ )
UpperCamelCase__ = model(
snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
UpperCamelCase__ = model(snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , token_type_ids=snake_case_ )
UpperCamelCase__ = model(snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ = model(pixel_values=snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = LayoutLMvaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = LayoutLMvaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = LayoutLMvaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , bbox=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : List[str] =False
a : Optional[int] =False
a : int =False
a : Any =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a : Any =(
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = LayoutLMvaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_=False ) -> Optional[Any]:
UpperCamelCase__ = copy.deepcopy(snake_case_ )
if model_class in get_values(snake_case_ ):
UpperCamelCase__ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(snake_case_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(snake_case_ ):
UpperCamelCase__ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
elif model_class in get_values(snake_case_ ):
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
elif model_class in [
*get_values(snake_case_ ),
]:
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
elif model_class in [
*get_values(snake_case_ ),
]:
UpperCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=snake_case_ , )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ = type
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = LayoutLMvaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
return LayoutLMvaImageProcessor(apply_ocr=snake_case_ ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(snake_case_ )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='pt' ).pixel_values.to(snake_case_ )
UpperCamelCase__ = torch.tensor([[1, 2]] )
UpperCamelCase__ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCamelCase__ = model(
input_ids=input_ids.to(snake_case_ ) , bbox=bbox.to(snake_case_ ) , pixel_values=pixel_values.to(snake_case_ ) , )
# verify the logits
UpperCamelCase__ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , snake_case_ )
UpperCamelCase__ = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-4 ) )
| 20 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20 |
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ : Union[str, Any]= {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any]= ["""MobileNetV2FeatureExtractor"""]
A__ : Dict= ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any]= [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
A__ : Optional[Any]= _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(1 , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = collection[i]
UpperCamelCase__ = 0
UpperCamelCase__ = i - 1
while low <= high:
UpperCamelCase__ = (low + high) // 2
if val < collection[mid]:
UpperCamelCase__ = mid - 1
else:
UpperCamelCase__ = mid + 1
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , -1 ):
UpperCamelCase__ = collection[j - 1]
UpperCamelCase__ = val
return collection
if __name__ == "__main__":
A__ : str= input("""Enter numbers separated by a comma:\n""").strip()
A__ : Dict= [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 20 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[list[float]]:
"""simple docstring"""
UpperCamelCase__ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCamelCase__ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
UpperCamelCase__ = [[0.0, 0.0], [0.0, 0.0]]
UpperCamelCase__ , UpperCamelCase__ = matrix[1][1], matrix[0][0]
UpperCamelCase__ , UpperCamelCase__ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCamelCase__ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
UpperCamelCase__ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCamelCase__ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCamelCase__ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCamelCase__ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCamelCase__ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCamelCase__ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCamelCase__ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCamelCase__ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCamelCase__ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCamelCase__ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCamelCase__ = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
UpperCamelCase__ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCamelCase__ = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 20 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = [0 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase__ , UpperCamelCase__ = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase__ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
UpperCamelCase__ = min_edge
while go_next(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase__ , UpperCamelCase__ = i, i + z_result[i] - 1
return z_result
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase__ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
UpperCamelCase__ = ksize + 1
UpperCamelCase__ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE ):
for x in range(SCREAMING_SNAKE_CASE ):
# distance from center
UpperCamelCase__ = x - ksize // 2
UpperCamelCase__ = y - ksize // 2
# degree to radiant
UpperCamelCase__ = theta / 1_80 * np.pi
UpperCamelCase__ = np.cos(_theta )
UpperCamelCase__ = np.sin(_theta )
# get kernel x
UpperCamelCase__ = cos_theta * px + sin_theta * py
# get kernel y
UpperCamelCase__ = -sin_theta * px + cos_theta * py
# fill kernel
UpperCamelCase__ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A__ : int= imread("""../image_data/lena.jpg""")
# turn image in gray scale value
A__ : int= cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A__ : List[str]= np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
A__ : Tuple= gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A__ : Tuple= out / out.max() * 2_55
A__ : Union[str, Any]= out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 20 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[Any]= logging.get_logger(__name__)
A__ : Any= {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __lowerCamelCase ( _a ):
a : int ="""glpn"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=64 , snake_case_=10 , snake_case_=-1 , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = max_depth
UpperCamelCase__ = head_in_index
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_( ) -> Iterator[int]:
"""simple docstring"""
UpperCamelCase__ = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE ):
yield num
num += 1
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 2_00_00_00 ) -> int:
"""simple docstring"""
return sum(takewhile(lambda SCREAMING_SNAKE_CASE : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : List[Any]= ["""bert-base-uncased""", """bert-base-cased"""]
A__ : Optional[int]= """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __lowerCamelCase ( tf.keras.Model ):
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase__ = TFAutoModel.from_config(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = self.tokenizer(snake_case_ )
UpperCamelCase__ = self.bert(**snake_case_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case_ , use_fast_bert_tokenizer=snake_case_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(snake_case_ , return_tensors='tf' , padding='longest' )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(snake_case_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(snake_case_ )
UpperCamelCase__ = compiled_tokenizer(snake_case_ )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(snake_case_ ) / 'saved.model'
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = loaded_model(snake_case_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 20 | 1 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
A__ : List[str]= """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
A__ : Tuple= """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
A__ : List[Any]= """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=True , snake_case_=False ) -> Optional[Any]:
if rouge_types is None:
UpperCamelCase__ = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
UpperCamelCase__ = rouge_scorer.RougeScorer(rouge_types=snake_case_ , use_stemmer=snake_case_ )
if use_aggregator:
UpperCamelCase__ = scoring.BootstrapAggregator()
else:
UpperCamelCase__ = []
for ref, pred in zip(snake_case_ , snake_case_ ):
UpperCamelCase__ = scorer.score(snake_case_ , snake_case_ )
if use_aggregator:
aggregator.add_scores(snake_case_ )
else:
scores.append(snake_case_ )
if use_aggregator:
UpperCamelCase__ = aggregator.aggregate()
else:
UpperCamelCase__ = {}
for key in scores[0]:
UpperCamelCase__ = [score[key] for score in scores]
return result
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20 | 1 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A__ : List[Any]= logging.get_logger(__name__)
A__ : Tuple= OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
A__ : Optional[Any]= _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase__ = model_type_to_module_name(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = importlib.import_module(F'.{module_name}' , 'transformers.models' )
try:
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE , '__name__' , SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase__ = importlib.import_module('transformers' )
if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return None
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = get_file_from_repo(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(SCREAMING_SNAKE_CASE , encoding='utf-8' ) as reader:
return json.load(SCREAMING_SNAKE_CASE )
class __lowerCamelCase :
def __init__( self ) -> Any:
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( cls , snake_case_ , **snake_case_ ) -> Optional[int]:
UpperCamelCase__ = kwargs.pop('config' , snake_case_ )
UpperCamelCase__ = kwargs.pop('trust_remote_code' , snake_case_ )
UpperCamelCase__ = True
UpperCamelCase__ , UpperCamelCase__ = ImageProcessingMixin.get_image_processor_dict(snake_case_ , **snake_case_ )
UpperCamelCase__ = config_dict.get('image_processor_type' , snake_case_ )
UpperCamelCase__ = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
UpperCamelCase__ = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCamelCase__ = config_dict.pop('feature_extractor_type' , snake_case_ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
UpperCamelCase__ = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
UpperCamelCase__ = config_dict['auto_map']['AutoFeatureExtractor']
UpperCamelCase__ = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ , **snake_case_ )
# It could be in `config.image_processor_type``
UpperCamelCase__ = getattr(snake_case_ , 'image_processor_type' , snake_case_ )
if hasattr(snake_case_ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
UpperCamelCase__ = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
UpperCamelCase__ = image_processor_class_from_name(snake_case_ )
UpperCamelCase__ = image_processor_auto_map is not None
UpperCamelCase__ = image_processor_class is not None or type(snake_case_ ) in IMAGE_PROCESSOR_MAPPING
UpperCamelCase__ = resolve_trust_remote_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if has_remote_code and trust_remote_code:
UpperCamelCase__ = get_class_from_dynamic_module(
snake_case_ , snake_case_ , **snake_case_ )
UpperCamelCase__ = kwargs.pop('code_revision' , snake_case_ )
if os.path.isdir(snake_case_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(snake_case_ , **snake_case_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(snake_case_ , **snake_case_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(snake_case_ ) in IMAGE_PROCESSOR_MAPPING:
UpperCamelCase__ = IMAGE_PROCESSOR_MAPPING[type(snake_case_ )]
return image_processor_class.from_dict(snake_case_ , **snake_case_ )
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ ) -> Any:
IMAGE_PROCESSOR_MAPPING.register(snake_case_ , snake_case_ )
| 20 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , snake_case_=0.6 , snake_case_=None , ) -> Any:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = mask_ratio
UpperCamelCase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFViTMAEModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = TFViTMAEForPreTraining(snake_case_ )
UpperCamelCase__ = model(snake_case_ , training=snake_case_ )
# expected sequence length = num_patches
UpperCamelCase__ = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = TFViTMAEForPreTraining(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ , training=snake_case_ )
UpperCamelCase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : int =(TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a : List[str] ={"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a : List[Any] =False
a : str =False
a : Dict =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = TFViTMAEModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , tf.keras.layers.Layer ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
# make the mask reproducible
np.random.seed(2 )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model(snake_case_ , noise=snake_case_ )
UpperCamelCase__ = copy.deepcopy(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = model(**snake_case_ , noise=snake_case_ )
UpperCamelCase__ = outputs_dict[0].numpy()
UpperCamelCase__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
# make the mask reproducible
np.random.seed(2 )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(snake_case_ ):
UpperCamelCase__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(snake_case_ ):
UpperCamelCase__ = v.numpy()
else:
UpperCamelCase__ = np.array(snake_case_ )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = prepare_numpy_arrays(snake_case_ )
UpperCamelCase__ = model(snake_case_ , noise=snake_case_ )
UpperCamelCase__ = model(**snake_case_ , noise=snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Any:
# make masks reproducible
np.random.seed(2 )
UpperCamelCase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase__ = tf.constant(snake_case_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ = tf_noise
super().check_pt_tf_models(snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
# make mask reproducible
np.random.seed(2 )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(snake_case_ )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(snake_case_ , snake_case_ ),)
if isinstance(snake_case_ , snake_case_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(snake_case_ , '_keras_serializable' , snake_case_ )
}
UpperCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase__ = tf.convert_to_tensor(snake_case_ )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase__ = main_layer_class(snake_case_ )
UpperCamelCase__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase__ = tf.keras.Model(snake_case_ , outputs=main_layer(snake_case_ ) )
UpperCamelCase__ = model(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = os.path.join(snake_case_ , 'keras_model.h5' )
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(
snake_case_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(snake_case_ , tf.keras.Model )
UpperCamelCase__ = model(snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
# make mask reproducible
np.random.seed(2 )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model(snake_case_ , noise=snake_case_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase__ = outputs.last_hidden_state.numpy()
UpperCamelCase__ = 0
else:
UpperCamelCase__ = outputs.logits.numpy()
UpperCamelCase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
UpperCamelCase__ = model_class.from_pretrained(snake_case_ )
UpperCamelCase__ = model(snake_case_ , noise=snake_case_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase__ = after_outputs['last_hidden_state'].numpy()
UpperCamelCase__ = 0
else:
UpperCamelCase__ = after_outputs['logits'].numpy()
UpperCamelCase__ = 0
UpperCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
# make mask reproducible
np.random.seed(2 )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model(snake_case_ , noise=snake_case_ )
UpperCamelCase__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(snake_case_ )
UpperCamelCase__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase__ = model_class.from_config(model.config )
UpperCamelCase__ = new_model(snake_case_ ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase__ = new_model(snake_case_ , noise=snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCamelCase__ = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ = ViTMAEConfig()
UpperCamelCase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase__ = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase__ = model(**snake_case_ , noise=snake_case_ )
# verify the logits
UpperCamelCase__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case_ , atol=1E-4 )
| 20 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : str= {"""vocab_file""": """spiece.model"""}
A__ : Union[str, Any]= {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ : Union[str, Any]= {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
A__ : Optional[Any]= """▁"""
class __lowerCamelCase ( _a ):
a : Dict =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=100 , snake_case_=None , snake_case_ = None , snake_case_=True , **snake_case_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case_ : bool('extra_id' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case_ , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case_ )) + [1]
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return list(
set(filter(lambda snake_case_ : bool(re.search(r'<extra_id_\d+>' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return [self._convert_token_to_id(snake_case_ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[int]:
if len(snake_case_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , snake_case_ ) -> Any:
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(snake_case_ , ' ' )
return super().tokenize(snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[Any]:
if not self.legacy:
UpperCamelCase__ = text.startswith(snake_case_ )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case_ ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
if token.startswith('<extra_id_' ):
UpperCamelCase__ = re.match(r'<extra_id_(\d+)>' , snake_case_ )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(snake_case_ )
else:
UpperCamelCase__ = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = []
UpperCamelCase__ = ''
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(snake_case_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , 'wb' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 20 | 1 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=sys.maxsize ) -> Any:
UpperCamelCase__ = 'bilinear'
UpperCamelCase__ = max_size
UpperCamelCase__ = short_edge_length
def __call__( self , snake_case_ ) -> int:
UpperCamelCase__ = []
for img in imgs:
UpperCamelCase__ , UpperCamelCase__ = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCamelCase__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCamelCase__ = size * 1.0 / min(snake_case_ , snake_case_ )
if h < w:
UpperCamelCase__ , UpperCamelCase__ = size, scale * w
else:
UpperCamelCase__ , UpperCamelCase__ = scale * h, size
if max(snake_case_ , snake_case_ ) > self.max_size:
UpperCamelCase__ = self.max_size * 1.0 / max(snake_case_ , snake_case_ )
UpperCamelCase__ = newh * scale
UpperCamelCase__ = neww * scale
UpperCamelCase__ = int(neww + 0.5 )
UpperCamelCase__ = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCamelCase__ = Image.fromarray(snake_case_ )
UpperCamelCase__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCamelCase__ = np.asarray(snake_case_ )
else:
UpperCamelCase__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCamelCase__ = nn.functional.interpolate(
snake_case_ , (newh, neww) , mode=self.interp_method , align_corners=snake_case_ ).squeeze(0 )
img_augs.append(snake_case_ )
return img_augs
class __lowerCamelCase :
def __init__( self , snake_case_ ) -> Any:
UpperCamelCase__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCamelCase__ = cfg.INPUT.FORMAT
UpperCamelCase__ = cfg.SIZE_DIVISIBILITY
UpperCamelCase__ = cfg.PAD_VALUE
UpperCamelCase__ = cfg.INPUT.MAX_SIZE_TEST
UpperCamelCase__ = cfg.MODEL.DEVICE
UpperCamelCase__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__ = lambda snake_case_ : (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = tuple(max(snake_case_ ) for s in zip(*[img.shape for img in images] ) )
UpperCamelCase__ = [im.shape[-2:] for im in images]
UpperCamelCase__ = [
nn.functional.pad(
snake_case_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case_ , snake_case_ )
]
return torch.stack(snake_case_ ), torch.tensor(snake_case_ )
def __call__( self , snake_case_ , snake_case_=False ) -> List[str]:
with torch.no_grad():
if not isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = [images]
if single_image:
assert len(snake_case_ ) == 1
for i in range(len(snake_case_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case_ , images.pop(snake_case_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case_ , torch.as_tensor(img_tensorize(images.pop(snake_case_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCamelCase__ = torch.tensor([im.shape[:2] for im in images] )
UpperCamelCase__ = self.aug(snake_case_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCamelCase__ = [self.normalizer(snake_case_ ) for x in images]
# now pad them to do the following operations
UpperCamelCase__ , UpperCamelCase__ = self.pad(snake_case_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCamelCase__ = torch.true_divide(snake_case_ , snake_case_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
assert torch.isfinite(SCREAMING_SNAKE_CASE ).all(), "Box tensor contains infinite or NaN!"
UpperCamelCase__ , UpperCamelCase__ = box_size
tensor[:, 0].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 1].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 2].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 3].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
| 20 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A__ : Any= TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(snake_case_ ) , self.find_set(snake_case_ ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# add an edge with the given weight
self.add_node(snake_case_ )
self.add_node(snake_case_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ , snake_case_ , snake_case_ )
disjoint_set.union(snake_case_ , snake_case_ )
return graph
| 20 | 1 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 |
"""simple docstring"""
A__ : Tuple= """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__ : int= """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 | 1 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A__ : Dict= """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
A__ : str= F"""https://www.google.com/search?q={query}&num=100"""
A__ : Optional[Any]= requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
A__ : Tuple= (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
A__ : Optional[Any]= parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 20 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : Any= """src/diffusers"""
# Matches is_xxx_available()
A__ : Tuple= re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A__ : Any= re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A__ : Optional[Any]= """
{0} = None
"""
A__ : List[Any]= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A__ : Dict= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> str:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
UpperCamelCase__ = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F'dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[int]= parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 | 1 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = 'ylacombe/bark-small'
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = 'en_speaker_1'
UpperCamelCase__ = 'This is a test string'
UpperCamelCase__ = 'speaker_embeddings_path.json'
UpperCamelCase__ = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BarkProcessor(tokenizer=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCamelCase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCamelCase__ = 35
UpperCamelCase__ = 2
UpperCamelCase__ = 8
UpperCamelCase__ = {
'semantic_prompt': np.ones(snake_case_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCamelCase__ = processor(text=self.input_string , voice_preset=snake_case_ )
UpperCamelCase__ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCamelCase__ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(snake_case_ , **snake_case_ )
UpperCamelCase__ = processor(text=self.input_string , voice_preset=snake_case_ )
UpperCamelCase__ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCamelCase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BarkProcessor(tokenizer=snake_case_ )
UpperCamelCase__ = processor(text=self.input_string )
UpperCamelCase__ = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 20 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20 | 1 |
"""simple docstring"""
class __lowerCamelCase :
def __init__( self ) -> Any:
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Union[str, Any]:
if vertex not in self.adjacency:
UpperCamelCase__ = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.get_edges()
for edge in edges:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
UpperCamelCase__ = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
UpperCamelCase__ = edges[i][2] + 1
for edge in edges:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def __str__( self ) -> Tuple:
UpperCamelCase__ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
UpperCamelCase__ = self.adjacency[head][tail]
string += F'{head} -> {tail} == {weight}\n'
return string.rstrip('\n' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_=None , snake_case_=None ) -> Union[str, Any]:
UpperCamelCase__ = Graph()
if vertices is None:
UpperCamelCase__ = []
if edges is None:
UpperCamelCase__ = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class __lowerCamelCase :
def __init__( self ) -> str:
UpperCamelCase__ = {}
UpperCamelCase__ = {}
def __len__( self ) -> int:
return len(self.parent )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if item in self.parent:
return self.find(snake_case_ )
UpperCamelCase__ = item
UpperCamelCase__ = 0
return item
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
UpperCamelCase__ = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = self.find(snake_case_ )
UpperCamelCase__ = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
UpperCamelCase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
UpperCamelCase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
UpperCamelCase__ = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
UpperCamelCase__ = graph.num_vertices
UpperCamelCase__ = Graph.UnionFind()
UpperCamelCase__ = []
while num_components > 1:
UpperCamelCase__ = {}
for vertex in graph.get_vertices():
UpperCamelCase__ = -1
UpperCamelCase__ = graph.get_edges()
for edge in edges:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
UpperCamelCase__ = union_find.find(snake_case_ )
UpperCamelCase__ = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCamelCase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCamelCase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
UpperCamelCase__ = num_components - 1
UpperCamelCase__ = Graph.build(edges=snake_case_ )
return mst
| 20 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
A__ : Dict= tuple[int, int, int]
A__ : List[str]= tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
A__ : Union[str, Any]= """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
A__ : List[Any]= """EGZWVONAHDCLFQMSIPJBYUKXTR"""
A__ : List[Any]= """FOBHMDKEXQNRAULPGSJVTYICZW"""
A__ : Union[str, Any]= """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
A__ : Optional[Any]= {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
A__ : Any= """RMDJXFUWGISLHVTCQNKYPBEZOA"""
A__ : Optional[int]= """SGLCPQWZHKXAREONTFBVIYJUDM"""
A__ : Tuple= """HVSICLTYKQUBXDWAJZOMFGPREN"""
A__ : Dict= """RZWQHFMVDBKICJLNTUXAGYPSOE"""
A__ : int= """LFKIJODBEGAMQPXVUHYSTCZRWN"""
A__ : str= """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""simple docstring"""
if (unique_rotsel := len(set(SCREAMING_SNAKE_CASE ) )) < 3:
UpperCamelCase__ = F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = rotpos
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(SCREAMING_SNAKE_CASE )
# Validates string and returns dict
UpperCamelCase__ = _plugboard(SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> dict[str, str]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'Plugboard setting isn\'t type string ({type(SCREAMING_SNAKE_CASE )})'
raise TypeError(SCREAMING_SNAKE_CASE )
elif len(SCREAMING_SNAKE_CASE ) % 2 != 0:
UpperCamelCase__ = F'Odd number of symbols ({len(SCREAMING_SNAKE_CASE )})'
raise Exception(SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
UpperCamelCase__ = set()
for i in pbstring:
if i not in abc:
UpperCamelCase__ = F'\'{i}\' not in list of symbols'
raise Exception(SCREAMING_SNAKE_CASE )
elif i in tmppbl:
UpperCamelCase__ = F'Duplicate symbol ({i})'
raise Exception(SCREAMING_SNAKE_CASE )
else:
tmppbl.add(SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
UpperCamelCase__ = {}
for j in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ):
UpperCamelCase__ = pbstring[j + 1]
UpperCamelCase__ = pbstring[j]
return pb
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , SCREAMING_SNAKE_CASE = "" , ) -> str:
"""simple docstring"""
UpperCamelCase__ = text.upper()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = _validator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , plugb.upper() )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = rotor_position
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
UpperCamelCase__ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
UpperCamelCase__ = plugboard[symbol]
# rotor ra --------------------------
UpperCamelCase__ = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
UpperCamelCase__ = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
UpperCamelCase__ = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
UpperCamelCase__ = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
UpperCamelCase__ = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
UpperCamelCase__ = rotora[index % len(SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
UpperCamelCase__ = reflector[symbol]
# 2nd rotors
UpperCamelCase__ = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
UpperCamelCase__ = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
UpperCamelCase__ = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
UpperCamelCase__ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Tuple= """This is my Python script that emulates the Enigma machine from WWII."""
A__ : Optional[int]= (1, 1, 1)
A__ : List[str]= """pictures"""
A__ : Optional[int]= (rotora, rotora, rotora)
A__ : Optional[Any]= enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 20 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Tuple= get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class __lowerCamelCase ( _a , unittest.TestCase ):
a : str =BartphoTokenizer
a : Union[str, Any] =False
a : str =True
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = ['▁This', '▁is', '▁a', '▁t', 'est']
UpperCamelCase__ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase__ = {'unk_token': '<unk>'}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
UpperCamelCase__ = BartphoTokenizer(snake_case_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> int:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Dict:
UpperCamelCase__ = 'This is a là test'
UpperCamelCase__ = 'This is a<unk><unk> test'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = BartphoTokenizer(snake_case_ , self.monolingual_vocab_file , **self.special_tokens_map )
UpperCamelCase__ = 'This is a là test'
UpperCamelCase__ = '▁This ▁is ▁a ▁l à ▁t est'.split()
UpperCamelCase__ = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 20 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
UpperCamelCase__ = parent
UpperCamelCase__ = 13
UpperCamelCase__ = 7
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = 99
UpperCamelCase__ = 384
UpperCamelCase__ = 2
UpperCamelCase__ = 4
UpperCamelCase__ = 37
UpperCamelCase__ = 'gelu'
UpperCamelCase__ = 0.1
UpperCamelCase__ = 0.1
UpperCamelCase__ = 512
UpperCamelCase__ = 16
UpperCamelCase__ = 2
UpperCamelCase__ = 0.02
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 128
UpperCamelCase__ = 2
UpperCamelCase__ = 9
UpperCamelCase__ = 1
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertForMaskedLM(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForSequenceClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFConvBertForMultipleChoice(config=snake_case_ )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForTokenClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = TFConvBertForQuestionAnswering(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : str =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Any =False
a : Dict =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = TFConvBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = True
if hasattr(snake_case_ , 'use_cache' ):
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
UpperCamelCase__ = os.path.join(snake_case_ , 'saved_model' , '1' )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = outputs['encoder_hidden_states']
UpperCamelCase__ = outputs['encoder_attentions']
else:
UpperCamelCase__ = outputs['hidden_states']
UpperCamelCase__ = outputs['attentions']
self.assertEqual(len(snake_case_ ) , snake_case_ )
UpperCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
UpperCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(snake_case_ )[0]
UpperCamelCase__ = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = get_failure_array(SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
UpperCamelCase__ , UpperCamelCase__ = 0, 0 # index into text, pattern
while i < len(SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCamelCase__ = failure[j - 1]
continue
i += 1
return False
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = [0]
UpperCamelCase__ = 0
UpperCamelCase__ = 1
while j < len(SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCamelCase__ = failure[i - 1]
continue
j += 1
failure.append(SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
A__ : Dict= """abc1abc12"""
A__ : int= """alskfjaldsabc1abc1abc12k23adsfabcabc"""
A__ : Any= """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
A__ : Optional[Any]= """ABABX"""
A__ : Union[str, Any]= """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
A__ : List[str]= """AAAB"""
A__ : Tuple= """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
A__ : Any= """abcdabcy"""
A__ : Dict= """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
A__ : str= """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 20 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
A__ : Tuple= """#"""
class __lowerCamelCase :
def __init__( self ) -> None:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = self._trie
for char in text:
if char not in trie:
UpperCamelCase__ = {}
UpperCamelCase__ = trie[char]
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> tuple | list:
UpperCamelCase__ = self._trie
for char in prefix:
if char in trie:
UpperCamelCase__ = trie[char]
else:
return []
return self._elements(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> tuple:
UpperCamelCase__ = []
for c, v in d.items():
UpperCamelCase__ = [' '] if c == END else [(c + s) for s in self._elements(snake_case_ )]
result.extend(snake_case_ )
return tuple(snake_case_ )
A__ : Union[str, Any]= Trie()
A__ : Any= ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> tuple:
"""simple docstring"""
UpperCamelCase__ = trie.find_word(SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def lowerCAmelCase_( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
A__ : Dict= [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
A__ : Dict= [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[list[int]]:
"""simple docstring"""
UpperCamelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCamelCase__ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCamelCase__ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE )
return next_generation
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[Image.Image]:
"""simple docstring"""
UpperCamelCase__ = []
for _ in range(SCREAMING_SNAKE_CASE ):
# Create output image
UpperCamelCase__ = Image.new('RGB' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE )) )
UpperCamelCase__ = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
UpperCamelCase__ = 2_55 - cells[y][x] * 2_55
UpperCamelCase__ = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = new_generation(SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
A__ : int= generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 20 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 | 1 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A__ : Tuple= subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
A__ : Optional[Any]= subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
A__ : Optional[Any]= """|""".join(sys.argv[1:])
A__ : int= re.compile(rF"""^({joined_dirs}).*?\.py$""")
A__ : Tuple= [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 20 |
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
"""simple docstring"""
import math
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 0.1 ) -> int:
"""simple docstring"""
UpperCamelCase__ = 3
UpperCamelCase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(SCREAMING_SNAKE_CASE )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = 2
UpperCamelCase__ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 | 1 |
"""simple docstring"""
import requests
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase__ = {'Content-Type': 'application/json'}
UpperCamelCase__ = requests.post(SCREAMING_SNAKE_CASE , json={'text': message_body} , headers=SCREAMING_SNAKE_CASE )
if response.status_code != 2_00:
UpperCamelCase__ = (
'Request to slack returned an error '
F'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 20 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
UpperCamelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ )['last_hidden_state'].detach()
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
UpperCamelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ )['last_hidden_state'].detach()
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
| 20 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( _a ):
a : str =["""image_processor""", """tokenizer"""]
a : List[str] ="""ViltImageProcessor"""
a : Union[str, Any] =("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ) -> Any:
UpperCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
UpperCamelCase__ = kwargs.pop('feature_extractor' )
UpperCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
UpperCamelCase__ = self.image_processor
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> BatchEncoding:
UpperCamelCase__ = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
UpperCamelCase__ = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def SCREAMING_SNAKE_CASE__ ( self , *snake_case_ , **snake_case_ ) -> Tuple:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case_ , **snake_case_ ) -> Optional[Any]:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.tokenizer.model_input_names
UpperCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor
| 20 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 | 1 |
"""simple docstring"""
A__ : Any= {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
assert type(SCREAMING_SNAKE_CASE ) in (int, float) and decimal == int(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = int(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = ''
UpperCamelCase__ = False
if decimal < 0:
UpperCamelCase__ = True
decimal *= -1
while decimal > 0:
UpperCamelCase__ , UpperCamelCase__ = divmod(SCREAMING_SNAKE_CASE , 16 )
UpperCamelCase__ = values[remainder] + hexadecimal
UpperCamelCase__ = '0x' + hexadecimal
if negative:
UpperCamelCase__ = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
A__ : List[Any]= logging.get_logger("""transformers.models.encodec""")
A__ : str= {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
A__ : int= {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
A__ : int= {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
A__ : Optional[int]= {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
A__ : List[Any]= {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
A__ : List[Any]= {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
A__ : List[Any]= {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
A__ : Dict= []
A__ : Any= []
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('.' ):
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
UpperCamelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
UpperCamelCase__ = value
elif weight_type == "weight_g":
UpperCamelCase__ = value
elif weight_type == "weight_v":
UpperCamelCase__ = value
elif weight_type == "bias":
UpperCamelCase__ = value
elif weight_type == "running_mean":
UpperCamelCase__ = value
elif weight_type == "running_var":
UpperCamelCase__ = value
elif weight_type == "num_batches_tracked":
UpperCamelCase__ = value
elif weight_type == "weight_ih_l0":
UpperCamelCase__ = value
elif weight_type == "weight_hh_l0":
UpperCamelCase__ = value
elif weight_type == "bias_ih_l0":
UpperCamelCase__ = value
elif weight_type == "bias_hh_l0":
UpperCamelCase__ = value
elif weight_type == "weight_ih_l1":
UpperCamelCase__ = value
elif weight_type == "weight_hh_l1":
UpperCamelCase__ = value
elif weight_type == "bias_ih_l1":
UpperCamelCase__ = value
elif weight_type == "bias_hh_l1":
UpperCamelCase__ = value
else:
UpperCamelCase__ = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase__ , UpperCamelCase__ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCamelCase__ = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCamelCase__ = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
logger.info(F'{name} was ignored' )
continue
UpperCamelCase__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCamelCase__ , UpperCamelCase__ = key.split('.*.' )
if prefix in name and suffix in name:
UpperCamelCase__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
UpperCamelCase__ = True
if "*" in mapped_key:
UpperCamelCase__ = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
UpperCamelCase__ = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
UpperCamelCase__ = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ = 'weight_v'
elif "weight_ih_l0" in name:
UpperCamelCase__ = 'weight_ih_l0'
elif "weight_hh_l0" in name:
UpperCamelCase__ = 'weight_hh_l0'
elif "bias_ih_l0" in name:
UpperCamelCase__ = 'bias_ih_l0'
elif "bias_hh_l0" in name:
UpperCamelCase__ = 'bias_hh_l0'
elif "weight_ih_l1" in name:
UpperCamelCase__ = 'weight_ih_l1'
elif "weight_hh_l1" in name:
UpperCamelCase__ = 'weight_hh_l1'
elif "bias_ih_l1" in name:
UpperCamelCase__ = 'bias_ih_l1'
elif "bias_hh_l1" in name:
UpperCamelCase__ = 'bias_hh_l1'
elif "bias" in name:
UpperCamelCase__ = 'bias'
elif "weight" in name:
UpperCamelCase__ = 'weight'
elif "running_mean" in name:
UpperCamelCase__ = 'running_mean'
elif "running_var" in name:
UpperCamelCase__ = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase__ = 'num_batches_tracked'
else:
UpperCamelCase__ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
if config_path is not None:
UpperCamelCase__ = EncodecConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCamelCase__ = [8, 5, 4, 4]
UpperCamelCase__ = [2.2]
UpperCamelCase__ = 64
UpperCamelCase__ = 3_20_00
UpperCamelCase__ = 20_48
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
elif model_name == "encodec_48khz":
UpperCamelCase__ = [8, 5, 4, 2]
UpperCamelCase__ = [3.0, 6.0, 12.0, 24.0]
UpperCamelCase__ = 4_80_00
UpperCamelCase__ = 2
UpperCamelCase__ = False
UpperCamelCase__ = 'time_group_norm'
UpperCamelCase__ = True
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
UpperCamelCase__ = EncodecModel(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCamelCase__ = original_checkpoint['best_state']
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(SCREAMING_SNAKE_CASE )
model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : str= argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
A__ : List[str]= parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Optional[int]= logging.get_logger(__name__)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCamelCase__ = 1_28
elif "12-12" in model_name:
UpperCamelCase__ = 12
UpperCamelCase__ = 12
elif "14-14" in model_name:
UpperCamelCase__ = 14
UpperCamelCase__ = 14
elif "16-16" in model_name:
UpperCamelCase__ = 16
UpperCamelCase__ = 16
else:
raise ValueError('Model not supported' )
UpperCamelCase__ = 'huggingface/label-files'
if "speech-commands" in model_name:
UpperCamelCase__ = 35
UpperCamelCase__ = 'speech-commands-v2-id2label.json'
else:
UpperCamelCase__ = 5_27
UpperCamelCase__ = 'audioset-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "module.v" in name:
UpperCamelCase__ = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
UpperCamelCase__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
UpperCamelCase__ = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
UpperCamelCase__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
UpperCamelCase__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCamelCase__ = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
UpperCamelCase__ = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
UpperCamelCase__ = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = config.hidden_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[dim : dim * 2, :]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[:dim]
UpperCamelCase__ = val[dim : dim * 2]
UpperCamelCase__ = val[-dim:]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
UpperCamelCase__ = model_name_to_url[model_name]
UpperCamelCase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE )
# rename some keys
UpperCamelCase__ = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load 🤗 model
UpperCamelCase__ = ASTForAudioClassification(SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCamelCase__ = -4.2677393 if 'speech-commands' not in model_name else -6.845978
UpperCamelCase__ = 4.5689974 if 'speech-commands' not in model_name else 5.5654526
UpperCamelCase__ = 10_24 if 'speech-commands' not in model_name else 1_28
UpperCamelCase__ = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
UpperCamelCase__ = load_dataset('speech_commands' , 'v0.02' , split='validation' )
UpperCamelCase__ = dataset[0]['audio']['array']
else:
UpperCamelCase__ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
UpperCamelCase__ , UpperCamelCase__ = torchaudio.load(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = waveform.squeeze().numpy()
UpperCamelCase__ = feature_extractor(SCREAMING_SNAKE_CASE , sampling_rate=1_60_00 , return_tensors='pt' )
# forward pass
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCamelCase__ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCamelCase__ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCamelCase__ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCamelCase__ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCamelCase__ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCamelCase__ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCamelCase__ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCamelCase__ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
A__ : Optional[int]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
A__ : str= parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 20 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : List[Any]= ["""bert-base-uncased""", """bert-base-cased"""]
A__ : Optional[int]= """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __lowerCamelCase ( tf.keras.Model ):
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase__ = TFAutoModel.from_config(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = self.tokenizer(snake_case_ )
UpperCamelCase__ = self.bert(**snake_case_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case_ , use_fast_bert_tokenizer=snake_case_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(snake_case_ , return_tensors='tf' , padding='longest' )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(snake_case_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(snake_case_ )
UpperCamelCase__ = compiled_tokenizer(snake_case_ )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(snake_case_ ) / 'saved.model'
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = loaded_model(snake_case_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A__ : Any= TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(snake_case_ ) , self.find_set(snake_case_ ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# add an edge with the given weight
self.add_node(snake_case_ )
self.add_node(snake_case_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ , snake_case_ , snake_case_ )
disjoint_set.union(snake_case_ , snake_case_ )
return graph
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20 | 1 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = 10
UpperCamelCase__ = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCamelCase__ = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(SCREAMING_SNAKE_CASE ) ),
} , features=SCREAMING_SNAKE_CASE , )
return dataset
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE )
return filename
# FILE_CONTENT + files
A__ : Tuple= """\
Text data.
Second line of data."""
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCamelCase__ = FILE_CONTENT
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return filename
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
import bza
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCamelCase__ = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
with bza.open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
import gzip
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCamelCase__ = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
with gzip.open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCamelCase__ = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
with lza.frame.open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE , 'w' ) as archive:
archive.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
import tarfile
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
import lzma
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCamelCase__ = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
with lzma.open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
import zipfile
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCamelCase__ = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
with zstd.open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCamelCase__ = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return filename
A__ : Tuple= [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
A__ : Optional[Any]= [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
A__ : Any= {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
A__ : List[Any]= [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
A__ : List[str]= [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope='session' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = datasets.Dataset.from_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE ) ) as con:
UpperCamelCase__ = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(SCREAMING_SNAKE_CASE , 'w' , newline='' ) as f:
UpperCamelCase__ = csv.DictWriter(SCREAMING_SNAKE_CASE , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(SCREAMING_SNAKE_CASE , 'w' , newline='' ) as f:
UpperCamelCase__ = csv.DictWriter(SCREAMING_SNAKE_CASE , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
import bza
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
UpperCamelCase__ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCamelCase__ = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
UpperCamelCase__ = pq.ParquetWriter(SCREAMING_SNAKE_CASE , schema=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(SCREAMING_SNAKE_CASE ) )] for k in DATA[0]} , schema=SCREAMING_SNAKE_CASE )
writer.write_table(SCREAMING_SNAKE_CASE )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCamelCase__ = {'data': DATA}
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCamelCase__ = {'data': DATA_DICT_OF_LISTS}
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
import gzip
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(SCREAMING_SNAKE_CASE , 'rb' ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE , 'wb' ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
import gzip
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(SCREAMING_SNAKE_CASE , 'rb' ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE , 'wb' ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('nested' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.join('nested' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ = ['0', '1', '2', '3']
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = ['0', '1', '2', '3']
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = ['0', '1', '2', '3']
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename('unsupported.ext' ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCamelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( ) -> Tuple:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowerCAmelCase_( ) -> Any:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 20 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 20 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( _a ):
a : Tuple ="""ClapFeatureExtractor"""
a : Optional[Any] =("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , snake_case_ , snake_case_ ) -> Optional[Any]:
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = kwargs.pop('sampling_rate' , snake_case_ )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
UpperCamelCase__ = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if audios is not None:
UpperCamelCase__ = self.feature_extractor(
snake_case_ , sampling_rate=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and audios is not None:
UpperCamelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case_ , **snake_case_ ) -> List[Any]:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case_ , **snake_case_ ) -> int:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.tokenizer.model_input_names
UpperCamelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 20 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : str= {"""vocab_file""": """spiece.model"""}
A__ : Union[str, Any]= {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ : Union[str, Any]= {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
A__ : Optional[Any]= """▁"""
class __lowerCamelCase ( _a ):
a : Dict =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=100 , snake_case_=None , snake_case_ = None , snake_case_=True , **snake_case_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case_ : bool('extra_id' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case_ , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case_ )) + [1]
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return list(
set(filter(lambda snake_case_ : bool(re.search(r'<extra_id_\d+>' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return [self._convert_token_to_id(snake_case_ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[int]:
if len(snake_case_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , snake_case_ ) -> Any:
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(snake_case_ , ' ' )
return super().tokenize(snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[Any]:
if not self.legacy:
UpperCamelCase__ = text.startswith(snake_case_ )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case_ ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
if token.startswith('<extra_id_' ):
UpperCamelCase__ = re.match(r'<extra_id_(\d+)>' , snake_case_ )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(snake_case_ )
else:
UpperCamelCase__ = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = []
UpperCamelCase__ = ''
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(snake_case_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , 'wb' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 20 | 1 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCamelCase__ = flax_key_tuple[:-1] + ('weight',)
UpperCamelCase__ = torch.permute(SCREAMING_SNAKE_CASE , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE ):
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ('weight',)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if "metadata" in layer:
UpperCamelCase__ = layer.split('metadata' )
UpperCamelCase__ = ''.join(split_layer[0] )[:-1]
UpperCamelCase__ = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
UpperCamelCase__ = layer.split('kvstore' )
UpperCamelCase__ = ''.join(split_layer[0] )[:-1]
UpperCamelCase__ = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
UpperCamelCase__ = layer.split('/' )
UpperCamelCase__ = '/'.join(split_layer[:-1] )
UpperCamelCase__ = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCamelCase__ = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
UpperCamelCase__ = 'file'
else:
UpperCamelCase__ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = rename_keys(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {}
for k, v in current_block.items():
UpperCamelCase__ = v
UpperCamelCase__ = new_current_block
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = WEIGHTS_NAME ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = convert_file_size_to_int(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = {}
UpperCamelCase__ = 0
UpperCamelCase__ = 0
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
UpperCamelCase__ = serialization.msgpack_restore(fp.read() )['optimizer']['target']
UpperCamelCase__ = flatten_dict(SCREAMING_SNAKE_CASE , sep='/' )
UpperCamelCase__ = {}
for layer in checkpoint_info.keys():
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = get_key_and_tensorstore_dict(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if curr_real_layer_name in all_layers:
UpperCamelCase__ = content
else:
UpperCamelCase__ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCamelCase__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCamelCase__ = torch.tensor(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCamelCase__ , UpperCamelCase__ = rename_base_flax_keys(tuple(key.split('/' ) ) , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = '/'.join(SCREAMING_SNAKE_CASE )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F'-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCamelCase__ = {}
UpperCamelCase__ = 0
UpperCamelCase__ = raw_weights.to(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F'-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(SCREAMING_SNAKE_CASE ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = weights_name.replace(
'.bin' , F'-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE ):05d}.bin' ) # len(sharded_state_dicts):05d}
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = shard
for key in shard:
UpperCamelCase__ = shard_file
# Add the metadata
UpperCamelCase__ = {'total_size': total_size}
UpperCamelCase__ = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 'w' , encoding='utf-8' ) as f:
UpperCamelCase__ = json.dumps(SCREAMING_SNAKE_CASE , indent=2 , sort_keys=SCREAMING_SNAKE_CASE ) + '\n'
f.write(SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
A__ : Optional[int]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
A__ : int= parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCAmelCase_( ) -> Union[str, Any]:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCamelCase__ = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
UpperCamelCase__ = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
UpperCamelCase__ = TaTokenizer.from_pretrained('t5-small' )
UpperCamelCase__ = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 20 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A__ : Any= TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(snake_case_ ) , self.find_set(snake_case_ ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# add an edge with the given weight
self.add_node(snake_case_ )
self.add_node(snake_case_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ , snake_case_ , snake_case_ )
disjoint_set.union(snake_case_ , snake_case_ )
return graph
| 20 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Tuple= logging.get_logger(__name__)
A__ : Union[str, Any]= {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __lowerCamelCase ( _a ):
a : Optional[int] ="""big_bird"""
def __init__( self , snake_case_=5_0358 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu_new" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=4096 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=True , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=66 , snake_case_="block_sparse" , snake_case_=True , snake_case_=False , snake_case_=64 , snake_case_=3 , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , sep_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = rescale_embeddings
UpperCamelCase__ = attention_type
UpperCamelCase__ = use_bias
UpperCamelCase__ = block_size
UpperCamelCase__ = num_random_blocks
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 20 |
"""simple docstring"""
A__ : Tuple= """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__ : int= """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 | 1 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase__ = dataset_size < in_memory_max_size
else:
UpperCamelCase__ = False
UpperCamelCase__ = is_small_dataset(SCREAMING_SNAKE_CASE )
assert result == expected
| 20 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : Any= """src/diffusers"""
# Matches is_xxx_available()
A__ : Tuple= re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A__ : Any= re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A__ : Optional[Any]= """
{0} = None
"""
A__ : List[Any]= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A__ : Dict= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> str:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
UpperCamelCase__ = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F'dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[int]= parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A__ : Dict= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : List[str] =["""pixel_values"""]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = None , snake_case_ = True , snake_case_ = 1 / 255 , snake_case_ = True , snake_case_ = None , snake_case_ = None , **snake_case_ , ) -> None:
super().__init__(**snake_case_ )
UpperCamelCase__ = size if size is not None else {'shortest_edge': 256}
UpperCamelCase__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
UpperCamelCase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase__ = get_size_dict(snake_case_ , param_name='crop_size' )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
UpperCamelCase__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCamelCase__ = get_resize_output_image_size(snake_case_ , size=size['shortest_edge'] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
UpperCamelCase__ = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(snake_case_ , size=(size['height'], size['width']) , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ ) -> np.ndarray:
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ) -> Tuple:
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(snake_case_ , param_name='crop_size' )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
UpperCamelCase__ = {'pixel_values': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Union[str, Any]:
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(snake_case_ ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(snake_case_ ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=snake_case_ )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case_ )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 20 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
A__ : Optional[Any]= """docs/source/en/_toctree.yml"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCamelCase__ = [key for key, value in counts.items() if value > 1]
UpperCamelCase__ = []
for duplicate_key in duplicates:
UpperCamelCase__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(SCREAMING_SNAKE_CASE ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : s["title"].lower() )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
UpperCamelCase__ = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ = content[api_idx]['sections']
# Then to the model doc
UpperCamelCase__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCamelCase__ = api_doc[model_idx]['sections']
UpperCamelCase__ = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE ) if 'sections' in section]
UpperCamelCase__ = False
for idx, modality_doc in modalities_docs:
UpperCamelCase__ = modality_doc['sections']
UpperCamelCase__ = clean_model_doc_toc(SCREAMING_SNAKE_CASE )
if old_modality_doc != new_modality_doc:
UpperCamelCase__ = True
if overwrite:
UpperCamelCase__ = new_modality_doc
if diff:
if overwrite:
UpperCamelCase__ = model_doc
UpperCamelCase__ = api_doc
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
A__ : Union[str, Any]= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Union[str, Any]= parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 20 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 | 1 |
"""simple docstring"""
import random
class __lowerCamelCase :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> tuple[list[int], list[int]]:
UpperCamelCase__ = [ord(snake_case_ ) for i in text]
UpperCamelCase__ = []
UpperCamelCase__ = []
for i in plain:
UpperCamelCase__ = random.randint(1 , 300 )
UpperCamelCase__ = (i + k) * k
cipher.append(snake_case_ )
key.append(snake_case_ )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = []
for i in range(len(snake_case_ ) ):
UpperCamelCase__ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(snake_case_ ) )
return "".join(snake_case_ )
if __name__ == "__main__":
A__, A__ : Dict= Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 20 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=True , snake_case_=False , snake_case_=10 , snake_case_=3 , snake_case_=32 * 8 , snake_case_=32 * 8 , snake_case_=4 , snake_case_=64 , ) -> Dict:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = is_training
UpperCamelCase__ = use_auxiliary_loss
UpperCamelCase__ = num_queries
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_size
UpperCamelCase__ = max_size
UpperCamelCase__ = num_labels
UpperCamelCase__ = hidden_dim
UpperCamelCase__ = hidden_dim
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
UpperCamelCase__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
UpperCamelCase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
UpperCamelCase__ = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
UpperCamelCase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase__ = self.num_queries
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = [1, 1, 1, 1]
UpperCamelCase__ = self.num_channels
UpperCamelCase__ = 64
UpperCamelCase__ = 128
UpperCamelCase__ = self.hidden_dim
UpperCamelCase__ = self.hidden_dim
UpperCamelCase__ = self.hidden_dim
return config
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = output.encoder_hidden_states
UpperCamelCase__ = output.pixel_decoder_hidden_states
UpperCamelCase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_layers )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=False ) -> List[str]:
with torch.no_grad():
UpperCamelCase__ = MaskaFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
UpperCamelCase__ = model(snake_case_ , output_hidden_states=snake_case_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = MaskaFormerForUniversalSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase__ = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
comm_check_on_output(snake_case_ )
UpperCamelCase__ = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
a : List[Any] ={"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
a : List[Any] =False
a : Optional[int] =False
a : int =False
a : Union[str, Any] =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = MaskaFormerModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase__ = MaskaFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = (self.model_tester.min_size,) * 2
UpperCamelCase__ = {
'pixel_values': torch.randn((2, 3, *size) , device=snake_case_ ),
'mask_labels': torch.randn((2, 10, *size) , device=snake_case_ ),
'class_labels': torch.zeros(2 , 10 , device=snake_case_ ).long(),
}
UpperCamelCase__ = self.model_tester.get_config()
UpperCamelCase__ = MaskaFormerForUniversalSegmentation(snake_case_ ).to(snake_case_ )
UpperCamelCase__ = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ ).to(snake_case_ )
UpperCamelCase__ = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
if not self.model_tester.is_training:
return
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
UpperCamelCase__ = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ ).to(snake_case_ )
model.train()
UpperCamelCase__ = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
UpperCamelCase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A__ : List[str]= 1E-4
def lowerCAmelCase_( ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(snake_case_ )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(snake_case_ , return_tensors='pt' ).to(snake_case_ )
UpperCamelCase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
UpperCamelCase__ = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
UpperCamelCase__ = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case_ ).eval()
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(snake_case_ , return_tensors='pt' ).to(snake_case_ )
UpperCamelCase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase__ = model(**snake_case_ )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase__ = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
UpperCamelCase__ = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case_ ).eval()
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCamelCase__ = inputs['pixel_values'].to(snake_case_ )
UpperCamelCase__ = [el.to(snake_case_ ) for el in inputs['mask_labels']]
UpperCamelCase__ = [el.to(snake_case_ ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase__ = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 20 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
UpperCamelCase__ = parent
UpperCamelCase__ = 13
UpperCamelCase__ = 7
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = 99
UpperCamelCase__ = 384
UpperCamelCase__ = 2
UpperCamelCase__ = 4
UpperCamelCase__ = 37
UpperCamelCase__ = 'gelu'
UpperCamelCase__ = 0.1
UpperCamelCase__ = 0.1
UpperCamelCase__ = 512
UpperCamelCase__ = 16
UpperCamelCase__ = 2
UpperCamelCase__ = 0.02
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 128
UpperCamelCase__ = 2
UpperCamelCase__ = 9
UpperCamelCase__ = 1
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertForMaskedLM(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForSequenceClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFConvBertForMultipleChoice(config=snake_case_ )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForTokenClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = TFConvBertForQuestionAnswering(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : str =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Any =False
a : Dict =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = TFConvBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = True
if hasattr(snake_case_ , 'use_cache' ):
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
UpperCamelCase__ = os.path.join(snake_case_ , 'saved_model' , '1' )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = outputs['encoder_hidden_states']
UpperCamelCase__ = outputs['encoder_attentions']
else:
UpperCamelCase__ = outputs['hidden_states']
UpperCamelCase__ = outputs['attentions']
self.assertEqual(len(snake_case_ ) , snake_case_ )
UpperCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
UpperCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(snake_case_ )[0]
UpperCamelCase__ = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 20 | 1 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Dict= get_tests_dir("""fixtures/test_sentencepiece.model""")
A__ : Union[str, Any]= {"""target_lang""": """fi""", """source_lang""": """en"""}
A__ : int= """>>zh<<"""
A__ : List[str]= """Helsinki-NLP/"""
if is_torch_available():
A__ : Union[str, Any]= """pt"""
elif is_tf_available():
A__ : Dict= """tf"""
else:
A__ : Optional[Any]= """jax"""
@require_sentencepiece
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Tuple =MarianTokenizer
a : List[str] =False
a : int =True
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
UpperCamelCase__ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase__ = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
UpperCamelCase__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Dict:
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = '</s>'
UpperCamelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(snake_case_ ) , 9 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
UpperCamelCase__ = en_de_tokenizer(['I am a small frog'] , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
UpperCamelCase__ = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(snake_case_ , batch.input_ids[0] )
UpperCamelCase__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(snake_case_ )
UpperCamelCase__ = [x.name for x in Path(snake_case_ ).glob('*' )]
self.assertIn('source.spm' , snake_case_ )
MarianTokenizer.from_pretrained(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=snake_case_ , truncation=snake_case_ , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = tok(['I am a tiny frog', 'I am a small frog'] , padding=snake_case_ , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
# fmt: off
UpperCamelCase__ = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
UpperCamelCase__ = 'Tämä on testi'
UpperCamelCase__ = 'This is a test'
UpperCamelCase__ = [76, 7, 2047, 2]
UpperCamelCase__ = [69, 12, 11, 940, 2]
UpperCamelCase__ = tokenizer(snake_case_ ).input_ids
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = tokenizer(text_target=snake_case_ ).input_ids
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
| 20 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A__ : Optional[Any]= logging.get_logger(__name__)
A__ : Dict= {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
A__ : str= [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split('.' ):
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
UpperCamelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
UpperCamelCase__ = value
elif weight_type == "weight_g":
UpperCamelCase__ = value
elif weight_type == "weight_v":
UpperCamelCase__ = value
elif weight_type == "bias":
UpperCamelCase__ = value
elif weight_type == "running_mean":
UpperCamelCase__ = value
elif weight_type == "running_var":
UpperCamelCase__ = value
elif weight_type == "num_batches_tracked":
UpperCamelCase__ = value
elif weight_type == "inv_freq":
UpperCamelCase__ = value
else:
UpperCamelCase__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = fairseq_model.state_dict()
UpperCamelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase__ = True
if "*" in mapped_key:
UpperCamelCase__ = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
UpperCamelCase__ = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "pos_bias_u" in name:
UpperCamelCase__ = None
elif "pos_bias_v" in name:
UpperCamelCase__ = None
elif "weight_g" in name:
UpperCamelCase__ = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ = 'weight_v'
elif "bias" in name:
UpperCamelCase__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ = 'weight'
elif "running_mean" in name:
UpperCamelCase__ = 'running_mean'
elif "inv_freq" in name:
UpperCamelCase__ = 'inv_freq'
elif "running_var" in name:
UpperCamelCase__ = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase__ = 'num_batches_tracked'
else:
UpperCamelCase__ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = full_name.split('conv_layers.' )[-1]
UpperCamelCase__ = name.split('.' )
UpperCamelCase__ = int(items[0] )
UpperCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
UpperCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
UpperCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
UpperCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
UpperCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Any:
"""simple docstring"""
if config_path is not None:
UpperCamelCase__ = WavaVecaConformerConfig.from_pretrained(SCREAMING_SNAKE_CASE , hidden_act='swish' )
else:
UpperCamelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase__ = 'rotary'
if is_finetuned:
if dict_path:
UpperCamelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ = target_dict.pad_index
UpperCamelCase__ = target_dict.bos_index
UpperCamelCase__ = target_dict.eos_index
UpperCamelCase__ = len(target_dict.symbols )
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ = 0
UpperCamelCase__ = 1
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , )
UpperCamelCase__ = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
UpperCamelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = WavaVecaConformerForCTC(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ = WavaVecaConformerForPreTraining(SCREAMING_SNAKE_CASE )
if is_finetuned:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase__ = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Dict= argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
A__ : Any= parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 20 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20 | 1 |
"""simple docstring"""
from math import factorial
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
UpperCamelCase__ = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCamelCase__ = float(factorial(SCREAMING_SNAKE_CASE ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.7_5))
| 20 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 | 1 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A__ : Optional[int]= True
except ImportError:
A__ : Tuple= False
try:
from torch.hub import _get_torch_home
A__ : Union[str, Any]= _get_torch_home()
except ImportError:
A__ : str= os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A__ : Any= os.path.join(torch_cache_home, """transformers""")
A__ : str= """https://cdn.huggingface.co"""
A__ : Optional[int]= """https://s3.amazonaws.com/models.huggingface.co/bert"""
A__ : int= """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A__ : Any= os.path.join(PATH, """config.yaml""")
A__ : Dict= os.path.join(PATH, """attributes.txt""")
A__ : Optional[int]= os.path.join(PATH, """objects.txt""")
A__ : Optional[Any]= os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A__ : List[Any]= os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A__ : List[str]= os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A__ : Union[str, Any]= """pytorch_model.bin"""
A__ : Optional[Any]= """config.yaml"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=OBJECTS , SCREAMING_SNAKE_CASE=ATTRIBUTES ) -> str:
"""simple docstring"""
UpperCamelCase__ = []
with open(SCREAMING_SNAKE_CASE ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
UpperCamelCase__ = []
with open(SCREAMING_SNAKE_CASE ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = OrderedDict()
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
UpperCamelCase__ = pkl.load(SCREAMING_SNAKE_CASE )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
UpperCamelCase__ = ckp.pop(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase__ = torch.tensor(SCREAMING_SNAKE_CASE )
else:
assert isinstance(SCREAMING_SNAKE_CASE , torch.tensor ), type(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = v
return r
class __lowerCamelCase :
a : Optional[int] ={}
def __init__( self , snake_case_ , snake_case_ = "root" , snake_case_=0 ) -> List[str]:
UpperCamelCase__ = name
UpperCamelCase__ = level
UpperCamelCase__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
UpperCamelCase__ = copy.deepcopy(snake_case_ )
UpperCamelCase__ = copy.deepcopy(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = Config(snake_case_ , name=snake_case_ , level=level + 1 )
UpperCamelCase__ = v
setattr(self , snake_case_ , snake_case_ )
UpperCamelCase__ = d
def __repr__( self ) -> Tuple:
return str(list((self._pointer.keys()) ) )
def __setattr__( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = val
UpperCamelCase__ = val
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = len(snake_case_ ) - 1
UpperCamelCase__ = self._pointer
if len(snake_case_ ) > 1:
for i, l in enumerate(snake_case_ ):
if hasattr(self , snake_case_ ) and isinstance(getattr(self , snake_case_ ) , snake_case_ ):
setattr(getattr(self , snake_case_ ) , '.'.join(levels[i:] ) , snake_case_ )
if l == last_level:
UpperCamelCase__ = val
else:
UpperCamelCase__ = pointer[l]
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
return self._pointer
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[Any]:
with open(F'{file_name}' , 'w' ) as stream:
dump(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
with open(F'{file_name}' , 'w' ) as stream:
json.dump(snake_case_ , snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
with open(snake_case_ ) as stream:
UpperCamelCase__ = load(snake_case_ , Loader=snake_case_ )
return data
def __str__( self ) -> str:
UpperCamelCase__ = ' '
if self._name != "root":
UpperCamelCase__ = F'{t * (self._level-1)}{self._name}:\n'
else:
UpperCamelCase__ = ''
UpperCamelCase__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(snake_case_ , snake_case_ ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(snake_case_ ).__name__})\n'
UpperCamelCase__ = level
return r[:-1]
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case_ , **snake_case_ ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(snake_case_ , **snake_case_ )
return cls(snake_case_ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case_ , **snake_case_ ) -> int:
UpperCamelCase__ = kwargs.pop('cache_dir' , snake_case_ )
UpperCamelCase__ = kwargs.pop('force_download' , snake_case_ )
UpperCamelCase__ = kwargs.pop('resume_download' , snake_case_ )
UpperCamelCase__ = kwargs.pop('proxies' , snake_case_ )
UpperCamelCase__ = kwargs.pop('local_files_only' , snake_case_ )
if os.path.isdir(snake_case_ ):
UpperCamelCase__ = os.path.join(snake_case_ , snake_case_ )
elif os.path.isfile(snake_case_ ) or is_remote_url(snake_case_ ):
UpperCamelCase__ = pretrained_model_name_or_path
else:
UpperCamelCase__ = hf_bucket_url(snake_case_ , filename=snake_case_ , use_cdn=snake_case_ )
try:
# Load from URL or cache if already cached
UpperCamelCase__ = cached_path(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , local_files_only=snake_case_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
UpperCamelCase__ = Config.load_yaml(snake_case_ )
except EnvironmentError:
UpperCamelCase__ = 'Can\'t load config for'
raise EnvironmentError(snake_case_ )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(snake_case_ ), kwargs
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = torch.load('dump.pt' , map_location=in_tensor.device )
UpperCamelCase__ = in_tensor.numpy()
UpperCamelCase__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rtol=0.01 , atol=0.1 ), (
F'{sum([1 for x in np.isclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = urlparse(SCREAMING_SNAKE_CASE )
return parsed.scheme in ("http", "https")
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> str:
"""simple docstring"""
UpperCamelCase__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
UpperCamelCase__ = '/' not in model_id
if legacy_format:
return F'{endpoint}/{model_id}-{filename}'
else:
return F'{endpoint}/{model_id}/{filename}'
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
UpperCamelCase__ = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
ua += "; " + "; ".join('{}/{}'.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
ua += "; " + user_agent
UpperCamelCase__ = {'user-agent': ua}
if resume_size > 0:
UpperCamelCase__ = 'bytes=%d-' % (resume_size,)
UpperCamelCase__ = requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE )
if response.status_code == 4_16: # Range not satisfiable
return
UpperCamelCase__ = response.headers.get('Content-Length' )
UpperCamelCase__ = resume_size + int(SCREAMING_SNAKE_CASE ) if content_length is not None else None
UpperCamelCase__ = tqdm(
unit='B' , unit_scale=SCREAMING_SNAKE_CASE , total=SCREAMING_SNAKE_CASE , initial=SCREAMING_SNAKE_CASE , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(SCREAMING_SNAKE_CASE ) )
temp_file.write(SCREAMING_SNAKE_CASE )
progress.close()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , ) -> Union[str, Any]:
"""simple docstring"""
if cache_dir is None:
UpperCamelCase__ = TRANSFORMERS_CACHE
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = str(SCREAMING_SNAKE_CASE )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = None
if not local_files_only:
try:
UpperCamelCase__ = requests.head(SCREAMING_SNAKE_CASE , allow_redirects=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , timeout=SCREAMING_SNAKE_CASE )
if response.status_code == 2_00:
UpperCamelCase__ = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
UpperCamelCase__ = url_to_filename(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# get cache path to put the file
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(SCREAMING_SNAKE_CASE ):
return cache_path
else:
UpperCamelCase__ = [
file
for file in fnmatch.filter(os.listdir(SCREAMING_SNAKE_CASE ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(SCREAMING_SNAKE_CASE ) > 0:
return os.path.join(SCREAMING_SNAKE_CASE , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(SCREAMING_SNAKE_CASE ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
UpperCamelCase__ = cache_path + '.lock'
with FileLock(SCREAMING_SNAKE_CASE ):
# If the download just completed while the lock was activated.
if os.path.exists(SCREAMING_SNAKE_CASE ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
UpperCamelCase__ = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(SCREAMING_SNAKE_CASE , 'a+b' ) as f:
yield f
UpperCamelCase__ = _resumable_file_manager
if os.path.exists(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = os.stat(SCREAMING_SNAKE_CASE ).st_size
else:
UpperCamelCase__ = 0
else:
UpperCamelCase__ = partial(tempfile.NamedTemporaryFile , dir=SCREAMING_SNAKE_CASE , delete=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , SCREAMING_SNAKE_CASE , temp_file.name , )
http_get(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , resume_size=SCREAMING_SNAKE_CASE , user_agent=SCREAMING_SNAKE_CASE , )
os.replace(temp_file.name , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {'url': url, 'etag': etag}
UpperCamelCase__ = cache_path + '.json'
with open(SCREAMING_SNAKE_CASE , 'w' ) as meta_file:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return cache_path
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = url.encode('utf-8' )
UpperCamelCase__ = shaaaa(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = url_hash.hexdigest()
if etag:
UpperCamelCase__ = etag.encode('utf-8' )
UpperCamelCase__ = shaaaa(SCREAMING_SNAKE_CASE )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , ) -> Optional[Any]:
"""simple docstring"""
if cache_dir is None:
UpperCamelCase__ = TRANSFORMERS_CACHE
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = str(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = str(SCREAMING_SNAKE_CASE )
if is_remote_url(SCREAMING_SNAKE_CASE ):
# URL, so get it from the cache (downloading if necessary)
UpperCamelCase__ = get_from_cache(
SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , user_agent=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
elif os.path.exists(SCREAMING_SNAKE_CASE ):
# File, and it exists.
UpperCamelCase__ = url_or_filename
elif urlparse(SCREAMING_SNAKE_CASE ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(SCREAMING_SNAKE_CASE ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(SCREAMING_SNAKE_CASE ) )
if extract_compressed_file:
if not is_zipfile(SCREAMING_SNAKE_CASE ) and not tarfile.is_tarfile(SCREAMING_SNAKE_CASE ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
UpperCamelCase__ , UpperCamelCase__ = os.path.split(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = output_file.replace('.' , '-' ) + '-extracted'
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if os.path.isdir(SCREAMING_SNAKE_CASE ) and os.listdir(SCREAMING_SNAKE_CASE ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
UpperCamelCase__ = output_path + '.lock'
with FileLock(SCREAMING_SNAKE_CASE ):
shutil.rmtree(SCREAMING_SNAKE_CASE , ignore_errors=SCREAMING_SNAKE_CASE )
os.makedirs(SCREAMING_SNAKE_CASE )
if is_zipfile(SCREAMING_SNAKE_CASE ):
with ZipFile(SCREAMING_SNAKE_CASE , 'r' ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE )
zip_file.close()
elif tarfile.is_tarfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = tarfile.open(SCREAMING_SNAKE_CASE )
tar_file.extractall(SCREAMING_SNAKE_CASE )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(SCREAMING_SNAKE_CASE ) )
return output_path_extracted
return output_path
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE ) as f:
UpperCamelCase__ = eval(f.read() )
else:
UpperCamelCase__ = requests.get(SCREAMING_SNAKE_CASE )
try:
UpperCamelCase__ = requests.json()
except Exception:
UpperCamelCase__ = req.content.decode()
assert data is not None, "could not connect"
try:
UpperCamelCase__ = eval(SCREAMING_SNAKE_CASE )
except Exception:
UpperCamelCase__ = data.split('\n' )
req.close()
return data
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = requests.get(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , 'rb' ) as stream:
UpperCamelCase__ = pkl.load(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = weights.pop('model' )
UpperCamelCase__ = {}
for k, v in model.items():
UpperCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE )
if "running_var" in k:
UpperCamelCase__ = torch.tensor([0] )
UpperCamelCase__ = k.replace('running_var' , 'num_batches_tracked' )
UpperCamelCase__ = zero
return new
def lowerCAmelCase_( ) -> Dict:
"""simple docstring"""
print(F'{os.path.abspath(os.path.join(SCREAMING_SNAKE_CASE , os.pardir ) )}/demo.ipynb' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="RGB" ) -> Dict:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = cva.imread(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ = get_image_from_url(SCREAMING_SNAKE_CASE )
assert img is not None, F'could not connect to: {im}'
UpperCamelCase__ = cva.cvtColor(SCREAMING_SNAKE_CASE , cva.COLOR_BGR2RGB )
if input_format == "RGB":
UpperCamelCase__ = img[:, :, ::-1]
return img
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1 ) -> Optional[int]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ))
| 20 |
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=3 , snake_case_=32 , snake_case_=3 , snake_case_=10 , snake_case_=[10, 20, 30, 40] , snake_case_=[1, 1, 2, 1] , snake_case_=True , snake_case_=True , snake_case_="relu" , snake_case_=3 , snake_case_=None , ) -> Tuple:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embeddings_size
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_labels
UpperCamelCase__ = scope
UpperCamelCase__ = len(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = FlaxRegNetModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = FlaxRegNetForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : List[str] =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a : List[Any] =False
a : Optional[Any] =False
a : int =False
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxRegNetModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
return
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
UpperCamelCase__ = model(**snake_case_ )
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(outputs.logits.shape , snake_case_ )
UpperCamelCase__ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 20 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 20 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = dataset
UpperCamelCase__ = process
UpperCamelCase__ = params
def __len__( self ) -> Any:
return len(self.dataset )
def __getitem__( self , snake_case_ ) -> List[Any]:
UpperCamelCase__ = self.dataset[i]
UpperCamelCase__ = self.process(snake_case_ , **self.params )
return processed
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> Dict:
UpperCamelCase__ = loader
UpperCamelCase__ = infer
UpperCamelCase__ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCamelCase__ = None
UpperCamelCase__ = loader_batch_size
# Internal bookkeeping
UpperCamelCase__ = None
UpperCamelCase__ = None
def __len__( self ) -> Tuple:
return len(self.loader )
def __iter__( self ) -> Optional[Any]:
UpperCamelCase__ = iter(self.loader )
return self
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCamelCase__ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCamelCase__ = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case_ , snake_case_ ):
# Convert ModelOutput to tuple first
UpperCamelCase__ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCamelCase__ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCamelCase__ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCamelCase__ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCamelCase__ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCamelCase__ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCamelCase__ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCamelCase__ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCamelCase__ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCamelCase__ = self._loader_batch_data.__class__(snake_case_ )
self._loader_batch_index += 1
return result
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCamelCase__ = next(self.iterator )
UpperCamelCase__ = self.infer(snake_case_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case_ , torch.Tensor ):
UpperCamelCase__ = processed
else:
UpperCamelCase__ = list(processed.keys() )[0]
UpperCamelCase__ = processed[key]
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
else:
UpperCamelCase__ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCamelCase__ = observed_batch_size
# Setting internal index to unwrap the batch
UpperCamelCase__ = processed
UpperCamelCase__ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> int:
super().__init__(snake_case_ , snake_case_ , snake_case_ )
def __iter__( self ) -> Union[str, Any]:
UpperCamelCase__ = iter(self.loader )
UpperCamelCase__ = None
return self
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
if self.subiterator is None:
UpperCamelCase__ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCamelCase__ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCamelCase__ = self.infer(next(self.iterator ) , **self.params )
UpperCamelCase__ = next(self.subiterator )
return processed
class __lowerCamelCase ( _a ):
def __iter__( self ) -> str:
UpperCamelCase__ = iter(self.loader )
return self
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
UpperCamelCase__ = False
UpperCamelCase__ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCamelCase__ = self.loader_batch_item()
UpperCamelCase__ = item.pop('is_last' )
accumulator.append(snake_case_ )
if is_last:
return accumulator
while not is_last:
UpperCamelCase__ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case_ , torch.Tensor ):
UpperCamelCase__ = processed
else:
UpperCamelCase__ = list(processed.keys() )[0]
UpperCamelCase__ = processed[key]
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
else:
UpperCamelCase__ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCamelCase__ = observed_batch_size
UpperCamelCase__ = processed
UpperCamelCase__ = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCamelCase__ = self.loader_batch_item()
UpperCamelCase__ = item.pop('is_last' )
accumulator.append(snake_case_ )
if is_last:
return accumulator
else:
UpperCamelCase__ = processed
UpperCamelCase__ = item.pop('is_last' )
accumulator.append(snake_case_ )
return accumulator
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = dataset
UpperCamelCase__ = key
def __len__( self ) -> int:
return len(self.dataset )
def __getitem__( self , snake_case_ ) -> Dict:
return self.dataset[i][self.key]
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = dataset
UpperCamelCase__ = keya
UpperCamelCase__ = keya
def __len__( self ) -> List[str]:
return len(self.dataset )
def __getitem__( self , snake_case_ ) -> Any:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 20 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="resnet50" , snake_case_=3 , snake_case_=32 , snake_case_=3 , snake_case_=True , snake_case_=True , ) -> Any:
UpperCamelCase__ = parent
UpperCamelCase__ = out_indices if out_indices is not None else [4]
UpperCamelCase__ = stage_names
UpperCamelCase__ = out_features
UpperCamelCase__ = backbone
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = is_training
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = TimmBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCamelCase ( _a , _a , _a , unittest.TestCase ):
a : Optional[int] =(TimmBackbone,) if is_torch_available() else ()
a : Any ={"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a : Optional[Any] =False
a : Any =False
a : Any =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = TimmBackboneModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = 'resnet18'
UpperCamelCase__ = 'microsoft/resnet-18'
UpperCamelCase__ = AutoBackbone.from_pretrained(snake_case_ , use_timm_backbone=snake_case_ )
UpperCamelCase__ = AutoBackbone.from_pretrained(snake_case_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
UpperCamelCase__ = AutoBackbone.from_pretrained(snake_case_ , use_timm_backbone=snake_case_ , out_indices=[1, 2, 3] )
UpperCamelCase__ = AutoBackbone.from_pretrained(snake_case_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCamelCase__ = self.all_model_classes[0]
UpperCamelCase__ = model_class(snake_case_ )
model.to(snake_case_ )
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCamelCase__ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCamelCase__ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(**snake_case_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCamelCase__ = copy.deepcopy(snake_case_ )
UpperCamelCase__ = None
UpperCamelCase__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(**snake_case_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
UpperCamelCase__ = copy.deepcopy(snake_case_ )
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(**snake_case_ )
| 20 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 | 1 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A__ : Union[str, Any]= None
A__ : Any= logging.get_logger(__name__)
A__ : Dict= {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
A__ : Optional[Any]= {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
A__ : List[Any]= {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class __lowerCamelCase ( _a ):
a : Union[str, Any] =VOCAB_FILES_NAMES
a : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
a : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Tuple =["""input_ids""", """attention_mask"""]
a : List[str] =TaTokenizer
a : List[int] =[]
def __init__( self , snake_case_=None , snake_case_=None , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=100 , snake_case_=None , **snake_case_ , ) -> Dict:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case_ : bool('extra_id_' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = False if not self.vocab_file else True
UpperCamelCase__ = extra_ids
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ ) -> int:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCamelCase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case_ , )
return max_model_length
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
logger.info(F'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCamelCase__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
return list(
set(filter(lambda snake_case_ : bool(re.search(r'<extra_id_\d+>' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
return [self.convert_tokens_to_ids(snake_case_ ) for token in self.get_sentinel_tokens()]
| 20 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=_a ):
a : int =["""keras_nlp"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ['keras_nlp'] )
| 20 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 | 1 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
A__ : Dict= logging.getLogger(__name__)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , encoding='utf_8' ) as f:
UpperCamelCase__ = csv.reader(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
next(SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = []
for dataset in encoded_datasets:
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
UpperCamelCase__ = np.zeros((n_batch, 2) , dtype=np.intaa )
UpperCamelCase__ = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
UpperCamelCase__ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCamelCase__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCamelCase__ = with_conta
UpperCamelCase__ = with_conta
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE ) - 1
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE ) - 1
UpperCamelCase__ = with_conta
UpperCamelCase__ = with_conta
UpperCamelCase__ = mc_label
UpperCamelCase__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase_( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE , default=3_74 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
UpperCamelCase__ = parser.parse_args()
print(SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
UpperCamelCase__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCamelCase__ = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
UpperCamelCase__ = ['_start_', '_delimiter_', '_classify_']
UpperCamelCase__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
model.to(SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj]
logger.info('Encoding dataset...' )
UpperCamelCase__ = load_rocstories_dataset(args.train_dataset )
UpperCamelCase__ = load_rocstories_dataset(args.eval_dataset )
UpperCamelCase__ = (train_dataset, eval_dataset)
UpperCamelCase__ = tokenize_and_encode(SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
UpperCamelCase__ = model.config.n_positions // 2 - 2
UpperCamelCase__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
UpperCamelCase__ = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
UpperCamelCase__ = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ = tensor_datasets[0], tensor_datasets[1]
UpperCamelCase__ = TensorDataset(*SCREAMING_SNAKE_CASE )
UpperCamelCase__ = RandomSampler(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
UpperCamelCase__ = TensorDataset(*SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SequentialSampler(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
UpperCamelCase__ = args.max_steps
UpperCamelCase__ = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
UpperCamelCase__ = list(model.named_parameters() )
UpperCamelCase__ = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
UpperCamelCase__ = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
UpperCamelCase__ = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
UpperCamelCase__ = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
if args.do_train:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = tqdm(SCREAMING_SNAKE_CASE , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = batch
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
UpperCamelCase__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
UpperCamelCase__ = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
UpperCamelCase__ = model.module if hasattr(SCREAMING_SNAKE_CASE , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
UpperCamelCase__ = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
UpperCamelCase__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
UpperCamelCase__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
UpperCamelCase__ , UpperCamelCase__ = 0, 0
UpperCamelCase__ , UpperCamelCase__ = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE , desc='Evaluating' ):
UpperCamelCase__ = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = batch
with torch.no_grad():
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = model(
SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = mc_logits.detach().cpu().numpy()
UpperCamelCase__ = mc_labels.to('cpu' ).numpy()
UpperCamelCase__ = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
UpperCamelCase__ = eval_loss / nb_eval_steps
UpperCamelCase__ = eval_accuracy / nb_eval_examples
UpperCamelCase__ = tr_loss / nb_tr_steps if args.do_train else None
UpperCamelCase__ = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
UpperCamelCase__ = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return EnvironmentCommand()
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]:
UpperCamelCase__ = parser.add_parser('env' )
download_parser.set_defaults(func=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = huggingface_hub.__version__
UpperCamelCase__ = 'not installed'
UpperCamelCase__ = 'NA'
if is_torch_available():
import torch
UpperCamelCase__ = torch.__version__
UpperCamelCase__ = torch.cuda.is_available()
UpperCamelCase__ = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase__ = transformers.__version__
UpperCamelCase__ = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase__ = accelerate.__version__
UpperCamelCase__ = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase__ = xformers.__version__
UpperCamelCase__ = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})',
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(snake_case_ ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=snake_case_ ).to(snake_case_ )
UpperCamelCase__ = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCamelCase__ = tokenizer('Hello there' , return_tensors='pt' ).input_ids
UpperCamelCase__ = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
UpperCamelCase__ = model(input_ids.to(snake_case_ ) , labels=labels.to(snake_case_ ) ).loss
UpperCamelCase__ = -(labels.shape[-1] * loss.item())
UpperCamelCase__ = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 20 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : List[Any]= ["""bert-base-uncased""", """bert-base-cased"""]
A__ : Optional[int]= """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __lowerCamelCase ( tf.keras.Model ):
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase__ = TFAutoModel.from_config(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = self.tokenizer(snake_case_ )
UpperCamelCase__ = self.bert(**snake_case_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case_ , use_fast_bert_tokenizer=snake_case_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(snake_case_ , return_tensors='tf' , padding='longest' )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(snake_case_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(snake_case_ )
UpperCamelCase__ = compiled_tokenizer(snake_case_ )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(snake_case_ ) / 'saved.model'
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = loaded_model(snake_case_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 20 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = params
UpperCamelCase__ = np.array(snake_case_ )
UpperCamelCase__ = np.array([len(snake_case_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , snake_case_ ) -> Tuple:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Dict:
return len(self.lengths )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.params.max_model_input_size
UpperCamelCase__ = self.lengths > max_len
logger.info(F'Splitting {sum(snake_case_ )} too long sequences.' )
def divide_chunks(snake_case_ , snake_case_ ):
return [l[i : i + n] for i in range(0 , len(snake_case_ ) , snake_case_ )]
UpperCamelCase__ = []
UpperCamelCase__ = []
if self.params.mlm:
UpperCamelCase__ , UpperCamelCase__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
UpperCamelCase__ , UpperCamelCase__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCamelCase__ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
UpperCamelCase__ = np.insert(snake_case_ , 0 , snake_case_ )
if sub_s[-1] != sep_id:
UpperCamelCase__ = np.insert(snake_case_ , len(snake_case_ ) , snake_case_ )
assert len(snake_case_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(snake_case_ )
new_tok_ids.extend(snake_case_ )
new_lengths.extend([len(snake_case_ ) for l in sub_seqs] )
UpperCamelCase__ = np.array(snake_case_ )
UpperCamelCase__ = np.array(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = len(self )
UpperCamelCase__ = self.lengths > 11
UpperCamelCase__ = self.token_ids[indices]
UpperCamelCase__ = self.lengths[indices]
UpperCamelCase__ = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCamelCase__ = self.params.special_tok_ids['unk_token']
UpperCamelCase__ = len(self )
UpperCamelCase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCamelCase__ = (unk_occs / self.lengths) < 0.5
UpperCamelCase__ = self.token_ids[indices]
UpperCamelCase__ = self.lengths[indices]
UpperCamelCase__ = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> str:
UpperCamelCase__ = [t[0] for t in batch]
UpperCamelCase__ = [t[1] for t in batch]
assert len(snake_case_ ) == len(snake_case_ )
# Max for paddings
UpperCamelCase__ = max(snake_case_ )
# Pad token ids
if self.params.mlm:
UpperCamelCase__ = self.params.special_tok_ids['pad_token']
else:
UpperCamelCase__ = self.params.special_tok_ids['unk_token']
UpperCamelCase__ = [list(t.astype(snake_case_ ) ) + [pad_idx] * (max_seq_len_ - len(snake_case_ )) for t in token_ids]
assert len(tk_ ) == len(snake_case_ )
assert all(len(snake_case_ ) == max_seq_len_ for t in tk_ )
UpperCamelCase__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCamelCase__ = torch.tensor(snake_case_ ) # (bs)
return tk_t, lg_t
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20 | 1 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 20 | 1 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=14 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=4 , snake_case_=4 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=0.02 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = rotary_dim
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = initializer_range
UpperCamelCase__ = None
UpperCamelCase__ = vocab_size - 1
UpperCamelCase__ = vocab_size - 1
UpperCamelCase__ = vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = 20
UpperCamelCase__ = model_class_name(snake_case_ )
UpperCamelCase__ = model.init_cache(input_ids.shape[0] , snake_case_ )
UpperCamelCase__ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
UpperCamelCase__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCamelCase__ = model(
input_ids[:, :-1] , attention_mask=snake_case_ , past_key_values=snake_case_ , position_ids=snake_case_ , )
UpperCamelCase__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase__ = model(
input_ids[:, -1:] , attention_mask=snake_case_ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case_ , )
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = 20
UpperCamelCase__ = model_class_name(snake_case_ )
UpperCamelCase__ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCamelCase__ = model.init_cache(input_ids.shape[0] , snake_case_ )
UpperCamelCase__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCamelCase__ = model(
input_ids[:, :-1] , attention_mask=snake_case_ , past_key_values=snake_case_ , position_ids=snake_case_ , )
UpperCamelCase__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase__ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case_ , position_ids=snake_case_ , )
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )
UpperCamelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Optional[Any] =(FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
a : List[Any] =(FlaxGPTJForCausalLM,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = FlaxGPTJModelTester(self )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
@tooslow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
UpperCamelCase__ = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=snake_case_ , truncation=snake_case_ )
UpperCamelCase__ = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
UpperCamelCase__ = False
UpperCamelCase__ = model.config.eos_token_id
UpperCamelCase__ = jax.jit(model.generate )
UpperCamelCase__ = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCamelCase__ = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
UpperCamelCase__ = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(snake_case_ , snake_case_ )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCamelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = pt_inputs['input_ids'].shape
UpperCamelCase__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case_ ):
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = pt_model_class(snake_case_ ).eval()
UpperCamelCase__ = model_class(snake_case_ , dtype=jnp.floataa )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case_ )
UpperCamelCase__ = fx_state
with torch.no_grad():
UpperCamelCase__ = pt_model(**snake_case_ ).to_tuple()
UpperCamelCase__ = fx_model(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(snake_case_ , snake_case_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case_ )
UpperCamelCase__ = model_class.from_pretrained(snake_case_ , from_pt=snake_case_ )
UpperCamelCase__ = fx_model_loaded(**snake_case_ ).to_tuple()
self.assertEqual(
len(snake_case_ ) , len(snake_case_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(snake_case_ , snake_case_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCamelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = pt_model_class(snake_case_ ).eval()
UpperCamelCase__ = model_class(snake_case_ , dtype=jnp.floataa )
UpperCamelCase__ = load_flax_weights_in_pytorch_model(snake_case_ , fx_model.params )
UpperCamelCase__ , UpperCamelCase__ = pt_inputs['input_ids'].shape
UpperCamelCase__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case_ ):
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = 0
UpperCamelCase__ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCamelCase__ = pt_model(**snake_case_ ).to_tuple()
UpperCamelCase__ = fx_model(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(snake_case_ , snake_case_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case_ )
UpperCamelCase__ = pt_model_class.from_pretrained(snake_case_ , from_flax=snake_case_ )
with torch.no_grad():
UpperCamelCase__ = pt_model_loaded(**snake_case_ ).to_tuple()
self.assertEqual(
len(snake_case_ ) , len(snake_case_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(snake_case_ , snake_case_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
UpperCamelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
| 20 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : str= {"""vocab_file""": """spiece.model"""}
A__ : Union[str, Any]= {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ : Union[str, Any]= {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
A__ : Optional[Any]= """▁"""
class __lowerCamelCase ( _a ):
a : Dict =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=100 , snake_case_=None , snake_case_ = None , snake_case_=True , **snake_case_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case_ : bool('extra_id' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case_ , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case_ )) + [1]
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return list(
set(filter(lambda snake_case_ : bool(re.search(r'<extra_id_\d+>' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return [self._convert_token_to_id(snake_case_ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[int]:
if len(snake_case_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , snake_case_ ) -> Any:
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(snake_case_ , ' ' )
return super().tokenize(snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[Any]:
if not self.legacy:
UpperCamelCase__ = text.startswith(snake_case_ )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case_ ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
if token.startswith('<extra_id_' ):
UpperCamelCase__ = re.match(r'<extra_id_(\d+)>' , snake_case_ )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(snake_case_ )
else:
UpperCamelCase__ = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = []
UpperCamelCase__ = ''
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(snake_case_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , 'wb' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 20 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Dict =DebertaTokenizer
a : Optional[int] =True
a : Optional[Any] =DebertaTokenizerFast
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
UpperCamelCase__ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase__ = {'unk_token': '[UNK]'}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = 'lower newer'
UpperCamelCase__ = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = 'lower newer'
UpperCamelCase__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCamelCase__ = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = tokenizer('Hello' , 'World' )
UpperCamelCase__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
UpperCamelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=snake_case_ )
UpperCamelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case_ )
UpperCamelCase__ = tokenizer.encode(
'sequence builders' , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ )
UpperCamelCase__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(snake_case_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase__ = tokenizer_class.from_pretrained('microsoft/deberta-base' )
UpperCamelCase__ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
UpperCamelCase__ = tokenizer(snake_case_ , padding=snake_case_ )
UpperCamelCase__ = [tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) for seq in encoding['input_ids']]
# fmt: off
UpperCamelCase__ = {
'input_ids': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase__ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , snake_case_ )
for expected, decoded in zip(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , snake_case_ )
| 20 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A__ : Any= TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(snake_case_ ) , self.find_set(snake_case_ ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# add an edge with the given weight
self.add_node(snake_case_ )
self.add_node(snake_case_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ , snake_case_ , snake_case_ )
disjoint_set.union(snake_case_ , snake_case_ )
return graph
| 20 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 |
"""simple docstring"""
A__ : Tuple= """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__ : int= """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
A__ : int= list[list[float | int]]
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Matrix:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [[0 for _ in range(size + 1 )] for _ in range(SCREAMING_SNAKE_CASE )]
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
for row in range(SCREAMING_SNAKE_CASE ):
for col in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = matrix[row][col]
UpperCamelCase__ = vector[row][0]
UpperCamelCase__ = 0
UpperCamelCase__ = 0
while row < size and col < size:
# pivoting
UpperCamelCase__ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
UpperCamelCase__ , UpperCamelCase__ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = augmented[rowa][col] / augmented[row][col]
UpperCamelCase__ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , SCREAMING_SNAKE_CASE ):
for row in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = augmented[row][col] / augmented[col][col]
for cola in range(SCREAMING_SNAKE_CASE , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(SCREAMING_SNAKE_CASE )
]
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Callable[[int], int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [[0 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
UpperCamelCase__ = [[0] for _ in range(SCREAMING_SNAKE_CASE )]
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
for x_val, y_val in enumerate(SCREAMING_SNAKE_CASE ):
for col in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = (x_val + 1) ** (size - col - 1)
UpperCamelCase__ = y_val
UpperCamelCase__ = solve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def interpolated_func(SCREAMING_SNAKE_CASE ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(SCREAMING_SNAKE_CASE ) )
return interpolated_func
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = question_function , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [func(SCREAMING_SNAKE_CASE ) for x_val in range(1 , order + 1 )]
UpperCamelCase__ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
UpperCamelCase__ = 0
UpperCamelCase__ = 42
UpperCamelCase__ = 42
for poly in polynomials:
UpperCamelCase__ = 1
while func(SCREAMING_SNAKE_CASE ) == poly(SCREAMING_SNAKE_CASE ):
x_val += 1
ret += poly(SCREAMING_SNAKE_CASE )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : Any= """src/diffusers"""
# Matches is_xxx_available()
A__ : Tuple= re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A__ : Any= re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A__ : Optional[Any]= """
{0} = None
"""
A__ : List[Any]= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A__ : Dict= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> str:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
UpperCamelCase__ = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F'dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[int]= parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(SCREAMING_SNAKE_CASE , 'r' ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = F'class {class_name}('
UpperCamelCase__ = F'{4 * " "}def {test_name}('
UpperCamelCase__ = F'{8 * " "}{correct_line.split()[0]}'
UpperCamelCase__ = F'{16 * " "}{correct_line.split()[0]}'
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = []
for line in lines:
if line.startswith(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = True
elif in_class and line.startswith(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = True
elif in_class and in_func and (line.startswith(SCREAMING_SNAKE_CASE ) or line.startswith(SCREAMING_SNAKE_CASE )):
UpperCamelCase__ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCamelCase__ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCamelCase__ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
UpperCamelCase__ = UpperCamelCase__ = UpperCamelCase__ = UpperCamelCase__ = False
else:
new_lines.append(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
for line in new_lines:
f.write(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
if fail is not None:
with open(SCREAMING_SNAKE_CASE , 'r' ) as f:
UpperCamelCase__ = {l.strip() for l in f.readlines()}
else:
UpperCamelCase__ = None
with open(SCREAMING_SNAKE_CASE , 'r' ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for line in correct_lines:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : str= argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
A__ : int= parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 20 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 | 1 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : str= {"""vocab_file""": """spiece.model"""}
A__ : Union[str, Any]= {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ : Union[str, Any]= {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
A__ : Optional[Any]= """▁"""
class __lowerCamelCase ( _a ):
a : Dict =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=100 , snake_case_=None , snake_case_ = None , snake_case_=True , **snake_case_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case_ : bool('extra_id' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case_ , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case_ )) + [1]
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return list(
set(filter(lambda snake_case_ : bool(re.search(r'<extra_id_\d+>' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return [self._convert_token_to_id(snake_case_ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[int]:
if len(snake_case_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , snake_case_ ) -> Any:
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(snake_case_ , ' ' )
return super().tokenize(snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[Any]:
if not self.legacy:
UpperCamelCase__ = text.startswith(snake_case_ )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case_ ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
if token.startswith('<extra_id_' ):
UpperCamelCase__ = re.match(r'<extra_id_(\d+)>' , snake_case_ )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(snake_case_ )
else:
UpperCamelCase__ = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = []
UpperCamelCase__ = ''
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(snake_case_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , 'wb' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 20 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A__ : int= {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple= [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
A__ : Any= _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
UpperCamelCase__ = parent
UpperCamelCase__ = 13
UpperCamelCase__ = 7
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = 99
UpperCamelCase__ = 384
UpperCamelCase__ = 2
UpperCamelCase__ = 4
UpperCamelCase__ = 37
UpperCamelCase__ = 'gelu'
UpperCamelCase__ = 0.1
UpperCamelCase__ = 0.1
UpperCamelCase__ = 512
UpperCamelCase__ = 16
UpperCamelCase__ = 2
UpperCamelCase__ = 0.02
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 128
UpperCamelCase__ = 2
UpperCamelCase__ = 9
UpperCamelCase__ = 1
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertForMaskedLM(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForSequenceClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFConvBertForMultipleChoice(config=snake_case_ )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForTokenClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = TFConvBertForQuestionAnswering(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : str =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Any =False
a : Dict =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = TFConvBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = True
if hasattr(snake_case_ , 'use_cache' ):
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
UpperCamelCase__ = os.path.join(snake_case_ , 'saved_model' , '1' )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = outputs['encoder_hidden_states']
UpperCamelCase__ = outputs['encoder_attentions']
else:
UpperCamelCase__ = outputs['hidden_states']
UpperCamelCase__ = outputs['attentions']
self.assertEqual(len(snake_case_ ) , snake_case_ )
UpperCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
UpperCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(snake_case_ )[0]
UpperCamelCase__ = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__( self , snake_case_ ) -> Dict:
UpperCamelCase__ = data
UpperCamelCase__ = None
UpperCamelCase__ = None
def lowerCAmelCase_( ) -> TreeNode:
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
UpperCamelCase__ = input('Enter the value of the root node: ' ).strip().lower()
UpperCamelCase__ = queue.Queue()
UpperCamelCase__ = TreeNode(int(SCREAMING_SNAKE_CASE ) )
q.put(SCREAMING_SNAKE_CASE )
while not q.empty():
UpperCamelCase__ = q.get()
UpperCamelCase__ = F'Enter the left node of {node_found.data}: '
UpperCamelCase__ = input(SCREAMING_SNAKE_CASE ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCamelCase__ = TreeNode(int(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = left_node
q.put(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = F'Enter the right node of {node_found.data}: '
UpperCamelCase__ = input(SCREAMING_SNAKE_CASE ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCamelCase__ = TreeNode(int(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = right_node
q.put(SCREAMING_SNAKE_CASE )
raise
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
UpperCamelCase__ = queue.Queue()
q.put(SCREAMING_SNAKE_CASE )
while not q.empty():
UpperCamelCase__ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
UpperCamelCase__ = queue.Queue()
q.put(SCREAMING_SNAKE_CASE )
while not q.empty():
UpperCamelCase__ = []
while not q.empty():
UpperCamelCase__ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
UpperCamelCase__ = []
UpperCamelCase__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = n.left
# end of while means current node doesn't have left child
UpperCamelCase__ = stack.pop()
# start to traverse its right child
UpperCamelCase__ = n.right
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
UpperCamelCase__ = []
UpperCamelCase__ = node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = n.left
UpperCamelCase__ = stack.pop()
print(n.data , end=',' )
UpperCamelCase__ = n.right
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
UpperCamelCase__ , UpperCamelCase__ = [], []
UpperCamelCase__ = node
stacka.append(SCREAMING_SNAKE_CASE )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCamelCase__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(SCREAMING_SNAKE_CASE )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = "" , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
UpperCamelCase__ , UpperCamelCase__ = divmod(width - len(SCREAMING_SNAKE_CASE ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
A__ : TreeNode= build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 20 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(SCREAMING_SNAKE_CASE ) == 1:
return True
UpperCamelCase__ = series[1] - series[0]
for index in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
UpperCamelCase__ = 0
for val in series:
answer += val
return answer / len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20 | 1 |
"""simple docstring"""
from collections import Counter
from timeit import timeit
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = "" ) -> bool:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return True
UpperCamelCase__ = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCamelCase__ = {}
for character in lower_case_input_str:
UpperCamelCase__ = character_freq_dict.get(SCREAMING_SNAKE_CASE , 0 ) + 1
UpperCamelCase__ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = "" ) -> None:
"""simple docstring"""
print('\nFor string = ' , SCREAMING_SNAKE_CASE , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(SCREAMING_SNAKE_CASE ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(SCREAMING_SNAKE_CASE ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
A__ : List[str]= input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
A__ : Dict= can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 20 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 | 1 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A__ : Union[str, Any]= datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowerCamelCase ( datasets.BuilderConfig ):
a : Optional[datasets.Features] =None
a : str ="utf-8"
a : Optional[str] =None
a : Optional[str] =None
a : bool =True # deprecated
a : Optional[int] =None # deprecated
a : int =1_0 << 2_0 # 10MB
a : Optional[bool] =None
class __lowerCamelCase ( datasets.ArrowBasedBuilder ):
a : Any =JsonConfig
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
UpperCamelCase__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Dict:
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
UpperCamelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
UpperCamelCase__ = data_files
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = [files]
UpperCamelCase__ = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
UpperCamelCase__ = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = [files]
UpperCamelCase__ = [dl_manager.iter_files(snake_case_ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase__ = self.config.features.arrow_schema.field(snake_case_ ).type
UpperCamelCase__ = pa_table.append_column(snake_case_ , pa.array([None] * len(snake_case_ ) , type=snake_case_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase__ = table_cast(snake_case_ , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> str:
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase__ = json.load(snake_case_ )
# We keep only the field we are interested in
UpperCamelCase__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case_ , (list, tuple) ):
UpperCamelCase__ = set().union(*[row.keys() for row in dataset] )
UpperCamelCase__ = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
else:
UpperCamelCase__ = dataset
UpperCamelCase__ = pa.Table.from_pydict(snake_case_ )
yield file_idx, self._cast_table(snake_case_ )
# If the file has one json object per line
else:
with open(snake_case_ , 'rb' ) as f:
UpperCamelCase__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase__ = max(self.config.chunksize // 32 , 16 << 10 )
UpperCamelCase__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
UpperCamelCase__ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase__ = batch.decode(self.config.encoding , errors=snake_case_ ).encode('utf-8' )
try:
while True:
try:
UpperCamelCase__ = paj.read_json(
io.BytesIO(snake_case_ ) , read_options=paj.ReadOptions(block_size=snake_case_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case_ , pa.ArrowInvalid )
and "straddling" not in str(snake_case_ )
or block_size > len(snake_case_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(snake_case_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase__ = json.load(snake_case_ )
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case_ , snake_case_ ): # list is the only sequence type supported in JSON
try:
UpperCamelCase__ = set().union(*[row.keys() for row in dataset] )
UpperCamelCase__ = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
UpperCamelCase__ = pa.Table.from_pydict(snake_case_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}' )
raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(snake_case_ )
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}' )
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case_ )
batch_idx += 1
| 20 |
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowerCamelCase ( _a ):
a : jnp.ndarray
@flax_register_to_config
class __lowerCamelCase ( nn.Module , _a , _a ):
a : int =3_2
a : int =4
a : int =4
a : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a : Tuple[str] =("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
a : Union[bool, Tuple[bool]] =False
a : Tuple[int] =(3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
a : int =2
a : Union[int, Tuple[int]] =8
a : Optional[Union[int, Tuple[int]]] =None
a : int =1_2_8_0
a : float =0.0
a : bool =False
a : jnp.dtype =jnp.floataa
a : bool =True
a : int =0
a : bool =False
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> FrozenDict:
# init input tensors
UpperCamelCase__ = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCamelCase__ = jnp.zeros(snake_case_ , dtype=jnp.floataa )
UpperCamelCase__ = jnp.ones((1,) , dtype=jnp.intaa )
UpperCamelCase__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCamelCase__ , UpperCamelCase__ = jax.random.split(snake_case_ )
UpperCamelCase__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case_ , snake_case_ , snake_case_ , snake_case_ )["params"]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.block_out_channels
UpperCamelCase__ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCamelCase__ = self.num_attention_heads or self.attention_head_dim
# input
UpperCamelCase__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCamelCase__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCamelCase__ = FlaxTimestepEmbedding(snake_case_ , dtype=self.dtype )
UpperCamelCase__ = self.only_cross_attention
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCamelCase__ = []
UpperCamelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
UpperCamelCase__ = output_channel
UpperCamelCase__ = block_out_channels[i]
UpperCamelCase__ = i == len(snake_case_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCamelCase__ = FlaxCrossAttnDownBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
UpperCamelCase__ = FlaxDownBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case_ )
UpperCamelCase__ = down_blocks
# mid
UpperCamelCase__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
UpperCamelCase__ = []
UpperCamelCase__ = list(reversed(snake_case_ ) )
UpperCamelCase__ = list(reversed(snake_case_ ) )
UpperCamelCase__ = list(reversed(snake_case_ ) )
UpperCamelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
UpperCamelCase__ = output_channel
UpperCamelCase__ = reversed_block_out_channels[i]
UpperCamelCase__ = reversed_block_out_channels[min(i + 1 , len(snake_case_ ) - 1 )]
UpperCamelCase__ = i == len(snake_case_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
UpperCamelCase__ = FlaxCrossAttnUpBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , prev_output_channel=snake_case_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
UpperCamelCase__ = FlaxUpBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , prev_output_channel=snake_case_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(snake_case_ )
UpperCamelCase__ = output_channel
UpperCamelCase__ = up_blocks
# out
UpperCamelCase__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
UpperCamelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_ = True , snake_case_ = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(snake_case_ , jnp.ndarray ):
UpperCamelCase__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCamelCase__ = timesteps.astype(dtype=jnp.floataa )
UpperCamelCase__ = jnp.expand_dims(snake_case_ , 0 )
UpperCamelCase__ = self.time_proj(snake_case_ )
UpperCamelCase__ = self.time_embedding(snake_case_ )
# 2. pre-process
UpperCamelCase__ = jnp.transpose(snake_case_ , (0, 2, 3, 1) )
UpperCamelCase__ = self.conv_in(snake_case_ )
# 3. down
UpperCamelCase__ = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ , UpperCamelCase__ = down_block(snake_case_ , snake_case_ , snake_case_ , deterministic=not train )
else:
UpperCamelCase__ , UpperCamelCase__ = down_block(snake_case_ , snake_case_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
UpperCamelCase__ = ()
for down_block_res_sample, down_block_additional_residual in zip(
snake_case_ , snake_case_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
UpperCamelCase__ = new_down_block_res_samples
# 4. mid
UpperCamelCase__ = self.mid_block(snake_case_ , snake_case_ , snake_case_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
UpperCamelCase__ = down_block_res_samples[-(self.layers_per_block + 1) :]
UpperCamelCase__ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = up_block(
snake_case_ , temb=snake_case_ , encoder_hidden_states=snake_case_ , res_hidden_states_tuple=snake_case_ , deterministic=not train , )
else:
UpperCamelCase__ = up_block(snake_case_ , temb=snake_case_ , res_hidden_states_tuple=snake_case_ , deterministic=not train )
# 6. post-process
UpperCamelCase__ = self.conv_norm_out(snake_case_ )
UpperCamelCase__ = nn.silu(snake_case_ )
UpperCamelCase__ = self.conv_out(snake_case_ )
UpperCamelCase__ = jnp.transpose(snake_case_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=snake_case_ )
| 20 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=32 , snake_case_=True , ) -> List[str]:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size_divisor
UpperCamelCase__ = do_rescale
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCamelCase ( _a , unittest.TestCase ):
a : List[str] =GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = GLPNImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case_ , 'size_divisor' ) )
self.assertTrue(hasattr(snake_case_ , 'resample' ) )
self.assertTrue(hasattr(snake_case_ , 'do_rescale' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 20 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Optional[int]= {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int]= ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str= [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int]= [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any]= [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A__ : Any= _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20 | 1 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
A__ : Optional[int]= argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_12,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'could not parse string as bool {string}' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
A__ : Tuple= parser.parse_args()
A__ : Optional[Any]= download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 | 1 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> np.array:
"""simple docstring"""
UpperCamelCase__ = int(np.ceil((x_end - xa) / step_size ) )
UpperCamelCase__ = np.zeros((n + 1,) )
UpperCamelCase__ = ya
UpperCamelCase__ = xa
for k in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE , y[k] )
UpperCamelCase__ = y[k] + (
(step_size / 2) * (ode_func(SCREAMING_SNAKE_CASE , y[k] ) + ode_func(x + step_size , SCREAMING_SNAKE_CASE ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class __lowerCamelCase :
def __init__( self , snake_case_ = None ) -> Tuple:
UpperCamelCase__ = value
UpperCamelCase__ = None # Added in order to delete a node easier
UpperCamelCase__ = None
UpperCamelCase__ = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'{self.value}': (self.left, self.right)} , indent=1 )
class __lowerCamelCase :
def __init__( self , snake_case_ = None ) -> List[str]:
UpperCamelCase__ = root
def __str__( self ) -> str:
return str(self.root )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if new_children is not None: # reset its kids
UpperCamelCase__ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case_ ): # If it is the right children
UpperCamelCase__ = new_children
else:
UpperCamelCase__ = new_children
else:
UpperCamelCase__ = new_children
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def SCREAMING_SNAKE_CASE__ ( self ) -> bool:
return self.root is None
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = Node(snake_case_ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCamelCase__ = new_node # set its root
else: # Tree is not empty
UpperCamelCase__ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCamelCase__ = new_node # We insert the new node in a leaf
break
else:
UpperCamelCase__ = parent_node.left
else:
if parent_node.right is None:
UpperCamelCase__ = new_node
break
else:
UpperCamelCase__ = parent_node.right
UpperCamelCase__ = parent_node
def SCREAMING_SNAKE_CASE__ ( self , *snake_case_ ) -> None:
for value in values:
self.__insert(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Node | None:
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
UpperCamelCase__ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCamelCase__ = node.left if value < node.value else node.right
return node
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ = None ) -> Node | None:
if node is None:
if self.root is None:
return None
UpperCamelCase__ = self.root
if not self.empty():
while node.right is not None:
UpperCamelCase__ = node.right
return node
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ = None ) -> Node | None:
if node is None:
UpperCamelCase__ = self.root
if self.root is None:
return None
if not self.empty():
UpperCamelCase__ = self.root
while node.left is not None:
UpperCamelCase__ = node.left
return node
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = self.search(snake_case_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case_ , snake_case_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case_ , node.left )
else:
UpperCamelCase__ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCamelCase__ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if node:
self.inorder(snake_case_ , node.left )
arr.append(node.value )
self.inorder(snake_case_ , node.right )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = []
self.inorder(snake_case_ , snake_case_ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[Node]:
"""simple docstring"""
UpperCamelCase__ = []
if curr_node is not None:
UpperCamelCase__ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCAmelCase_( ) -> None:
"""simple docstring"""
UpperCamelCase__ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCamelCase__ = BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class __lowerCamelCase :
def __init__( self , *,
snake_case_ = np.inf , snake_case_ = "linear" , snake_case_ = 0.0 , ) -> None:
UpperCamelCase__ = regularization
UpperCamelCase__ = gamma
if kernel == "linear":
UpperCamelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
UpperCamelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ = F'Unknown kernel: {kernel}'
raise ValueError(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> float:
return np.dot(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
UpperCamelCase__ = observations
UpperCamelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__) , ) = np.shape(snake_case_ )
def to_minimize(snake_case_ ) -> float:
UpperCamelCase__ = 0
((UpperCamelCase__) , ) = np.shape(snake_case_ )
for i in range(snake_case_ ):
for j in range(snake_case_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(snake_case_ )
UpperCamelCase__ = LinearConstraint(snake_case_ , 0 , 0 )
UpperCamelCase__ = Bounds(0 , self.regularization )
UpperCamelCase__ = minimize(
snake_case_ , np.ones(snake_case_ ) , bounds=snake_case_ , constraints=[ly_contraint] ).x
UpperCamelCase__ = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ = 0
for i in range(snake_case_ ):
for j in range(snake_case_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCamelCase__ = s / n
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , snake_case_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : List[Any]= ["""bert-base-uncased""", """bert-base-cased"""]
A__ : Optional[int]= """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __lowerCamelCase ( tf.keras.Model ):
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase__ = TFAutoModel.from_config(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = self.tokenizer(snake_case_ )
UpperCamelCase__ = self.bert(**snake_case_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case_ , use_fast_bert_tokenizer=snake_case_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(snake_case_ , return_tensors='tf' , padding='longest' )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(snake_case_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(snake_case_ )
UpperCamelCase__ = compiled_tokenizer(snake_case_ )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(snake_case_ ) / 'saved.model'
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = loaded_model(snake_case_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 20 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : List[str]= {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str]= ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int= [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any]= [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int= [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
A__ : Optional[Any]= _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 20 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( _a , _a , _a , unittest.TestCase ):
a : Tuple =StableDiffusionInstructPixaPixPipeline
a : Optional[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
a : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a : Dict =IMAGE_TO_IMAGE_IMAGE_PARAMS
a : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=snake_case_ )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase__ = CLIPTextModel(snake_case_ )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_=0 ) -> Union[str, Any]:
UpperCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(snake_case_ ) ).convert('RGB' )
if str(snake_case_ ).startswith('mps' ):
UpperCamelCase__ = torch.manual_seed(snake_case_ )
else:
UpperCamelCase__ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
UpperCamelCase__ = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_dummy_inputs(snake_case_ )
UpperCamelCase__ = sd_pipe(**snake_case_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
UpperCamelCase__ = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_dummy_inputs(snake_case_ )
UpperCamelCase__ = 'french fries'
UpperCamelCase__ = sd_pipe(**snake_case_ , negative_prompt=snake_case_ )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
UpperCamelCase__ = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_dummy_inputs(snake_case_ )
UpperCamelCase__ = [inputs['prompt']] * 2
UpperCamelCase__ = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
UpperCamelCase__ = torch.from_numpy(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
UpperCamelCase__ = image / 2 + 0.5
UpperCamelCase__ = image.permute(0 , 3 , 1 , 2 )
UpperCamelCase__ = image.repeat(2 , 1 , 1 , 1 )
UpperCamelCase__ = sd_pipe(**snake_case_ ).images
UpperCamelCase__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCamelCase__ = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' )
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
UpperCamelCase__ = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_dummy_inputs(snake_case_ )
UpperCamelCase__ = sd_pipe(**snake_case_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = [round(snake_case_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(snake_case_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
UpperCamelCase__ = VaeImageProcessor(do_resize=snake_case_ , do_normalize=snake_case_ )
UpperCamelCase__ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = pipe(**self.get_dummy_inputs_by_type(snake_case_ , input_image_type='pt' ) )[0]
UpperCamelCase__ = components['vae']
UpperCamelCase__ = self.get_dummy_inputs_by_type(snake_case_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCamelCase__ = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCamelCase__ = pipe(**snake_case_ )[0]
UpperCamelCase__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(snake_case_ , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=0 ) -> Tuple:
UpperCamelCase__ = torch.manual_seed(snake_case_ )
UpperCamelCase__ = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
UpperCamelCase__ = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
UpperCamelCase__ = self.get_inputs()
UpperCamelCase__ = pipe(**snake_case_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ )
UpperCamelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
UpperCamelCase__ = self.get_inputs()
UpperCamelCase__ = pipe(**snake_case_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ )
UpperCamelCase__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
UpperCamelCase__ = self.get_inputs()
UpperCamelCase__ = pipe(**snake_case_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = 0
def callback_fn(snake_case_ , snake_case_ , snake_case_ ) -> None:
UpperCamelCase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ = latents[0, -3:, -3:, -1]
UpperCamelCase__ = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCamelCase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ = latents[0, -3:, -3:, -1]
UpperCamelCase__ = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCamelCase__ = False
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ , torch_dtype=torch.floataa )
UpperCamelCase__ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
UpperCamelCase__ = self.get_inputs()
pipe(**snake_case_ , callback=snake_case_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ , torch_dtype=torch.floataa )
UpperCamelCase__ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ = self.get_inputs()
UpperCamelCase__ = pipe(**snake_case_ )
UpperCamelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase__ = inputs['image'].resize((504, 504) )
UpperCamelCase__ = 'timbrooks/instruct-pix2pix'
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
snake_case_ , safety_checker=snake_case_ , )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
UpperCamelCase__ = pipe(**snake_case_ )
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
UpperCamelCase__ = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 20 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : str= {"""vocab_file""": """spiece.model"""}
A__ : Union[str, Any]= {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ : Union[str, Any]= {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
A__ : Optional[Any]= """▁"""
class __lowerCamelCase ( _a ):
a : Dict =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=100 , snake_case_=None , snake_case_ = None , snake_case_=True , **snake_case_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case_ : bool('extra_id' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case_ , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case_ )) + [1]
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return list(
set(filter(lambda snake_case_ : bool(re.search(r'<extra_id_\d+>' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return [self._convert_token_to_id(snake_case_ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[int]:
if len(snake_case_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , snake_case_ ) -> Any:
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(snake_case_ , ' ' )
return super().tokenize(snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[Any]:
if not self.legacy:
UpperCamelCase__ = text.startswith(snake_case_ )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case_ ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
if token.startswith('<extra_id_' ):
UpperCamelCase__ = re.match(r'<extra_id_(\d+)>' , snake_case_ )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(snake_case_ )
else:
UpperCamelCase__ = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = []
UpperCamelCase__ = ''
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(snake_case_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , 'wb' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> list[float]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = coefficient_matrix.shape
UpperCamelCase__ , UpperCamelCase__ = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase__ = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(SCREAMING_SNAKE_CASE )
if colsa != 1:
UpperCamelCase__ = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(SCREAMING_SNAKE_CASE )
if rowsa != rowsa:
UpperCamelCase__ = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != rowsa:
UpperCamelCase__ = (
'Number of initial values must be equal to number of rows in coefficient '
F'matrix but received {len(SCREAMING_SNAKE_CASE )} and {rowsa}'
)
raise ValueError(SCREAMING_SNAKE_CASE )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
UpperCamelCase__ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCamelCase__ , UpperCamelCase__ = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = []
for row in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 0
for col in range(SCREAMING_SNAKE_CASE ):
if col == row:
UpperCamelCase__ = table[row][col]
elif col == cols - 1:
UpperCamelCase__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase__ = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = new_val
return [float(SCREAMING_SNAKE_CASE ) for i in new_val]
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = table.shape
UpperCamelCase__ = True
for i in range(0 , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A__ : Any= TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(snake_case_ ) , self.find_set(snake_case_ ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# add an edge with the given weight
self.add_node(snake_case_ )
self.add_node(snake_case_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ , snake_case_ , snake_case_ )
disjoint_set.union(snake_case_ , snake_case_ )
return graph
| 20 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( _a , unittest.TestCase ):
a : str =KandinskyVaaPriorPipeline
a : Optional[int] =["""prompt"""]
a : str =["""prompt""", """negative_prompt"""]
a : Dict =[
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
a : Union[str, Any] =False
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 100
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(snake_case_ )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
torch.manual_seed(0 )
UpperCamelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
UpperCamelCase__ = PriorTransformer(**snake_case_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
UpperCamelCase__ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
torch.manual_seed(0 )
UpperCamelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
UpperCamelCase__ = CLIPVisionModelWithProjection(snake_case_ )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=snake_case_ , do_normalize=snake_case_ , do_resize=snake_case_ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.dummy_prior
UpperCamelCase__ = self.dummy_image_encoder
UpperCamelCase__ = self.dummy_text_encoder
UpperCamelCase__ = self.dummy_tokenizer
UpperCamelCase__ = self.dummy_image_processor
UpperCamelCase__ = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=snake_case_ , clip_sample_range=10.0 , )
UpperCamelCase__ = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_=0 ) -> List[Any]:
if str(snake_case_ ).startswith('mps' ):
UpperCamelCase__ = torch.manual_seed(snake_case_ )
else:
UpperCamelCase__ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase__ = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = 'cpu'
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**snake_case_ )
UpperCamelCase__ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = pipe(**self.get_dummy_inputs(snake_case_ ) )
UpperCamelCase__ = output.image_embeds
UpperCamelCase__ = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
UpperCamelCase__ = image[0, -10:]
UpperCamelCase__ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
UpperCamelCase__ = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = torch_device == 'cpu'
UpperCamelCase__ = True
UpperCamelCase__ = False
self._test_inference_batch_single_identical(
test_max_difference=snake_case_ , relax_max_difference=snake_case_ , test_mean_pixel_difference=snake_case_ , )
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = torch_device == 'cpu'
UpperCamelCase__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=snake_case_ , test_mean_pixel_difference=snake_case_ , )
| 20 |
"""simple docstring"""
A__ : Tuple= """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__ : int= """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Optional[int]= logging.get_logger(__name__)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ = ''
else:
UpperCamelCase__ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
UpperCamelCase__ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ = in_proj_bias[: config.hidden_size]
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = dct.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = val
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = ViTConfig()
UpperCamelCase__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
UpperCamelCase__ = True
UpperCamelCase__ = int(vit_name[-12:-10] )
UpperCamelCase__ = int(vit_name[-9:-6] )
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = int(vit_name[-6:-4] )
UpperCamelCase__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
UpperCamelCase__ = 1_92
UpperCamelCase__ = 7_68
UpperCamelCase__ = 12
UpperCamelCase__ = 3
elif vit_name[9:].startswith('small' ):
UpperCamelCase__ = 3_84
UpperCamelCase__ = 15_36
UpperCamelCase__ = 12
UpperCamelCase__ = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
UpperCamelCase__ = 7_68
UpperCamelCase__ = 23_04
UpperCamelCase__ = 8
UpperCamelCase__ = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
UpperCamelCase__ = 10_24
UpperCamelCase__ = 40_96
UpperCamelCase__ = 24
UpperCamelCase__ = 16
elif vit_name[4:].startswith('huge' ):
UpperCamelCase__ = 12_80
UpperCamelCase__ = 51_20
UpperCamelCase__ = 32
UpperCamelCase__ = 16
# load original model from timm
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ = timm_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = create_rename_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCamelCase__ = ViTModel(SCREAMING_SNAKE_CASE ).eval()
else:
UpperCamelCase__ = ViTForImageClassification(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
UpperCamelCase__ = DeiTImageProcessor(size=config.image_size )
else:
UpperCamelCase__ = ViTImageProcessor(size=config.image_size )
UpperCamelCase__ = image_processor(images=prepare_img() , return_tensors='pt' )
UpperCamelCase__ = encoding['pixel_values']
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE )
if base_model:
UpperCamelCase__ = timm_model.forward_features(SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 )
else:
UpperCamelCase__ = timm_model(SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : List[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Dict= parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 20 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : Any= """src/diffusers"""
# Matches is_xxx_available()
A__ : Tuple= re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A__ : Any= re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A__ : Optional[Any]= """
{0} = None
"""
A__ : List[Any]= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A__ : Dict= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> str:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
UpperCamelCase__ = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F'dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[int]= parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 | 1 |
"""simple docstring"""
from maths.prime_check import is_prime
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'Input value of [number={number}] must be an integer'
raise TypeError(SCREAMING_SNAKE_CASE )
if is_prime(SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20 | 1 |
"""simple docstring"""
A__ : Tuple= """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__ : int= """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Union[str, Any]= logging.get_logger(__name__)
A__ : Tuple= {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __lowerCamelCase ( _a ):
a : Tuple ="""yolos"""
def __init__( self , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=[512, 864] , snake_case_=16 , snake_case_=3 , snake_case_=True , snake_case_=100 , snake_case_=True , snake_case_=False , snake_case_=1 , snake_case_=5 , snake_case_=2 , snake_case_=5 , snake_case_=2 , snake_case_=0.1 , **snake_case_ , ) -> str:
super().__init__(**snake_case_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class __lowerCamelCase ( _a ):
a : int =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20 | 1 |
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 20 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
UpperCamelCase__ = parent
UpperCamelCase__ = 13
UpperCamelCase__ = 7
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = 99
UpperCamelCase__ = 384
UpperCamelCase__ = 2
UpperCamelCase__ = 4
UpperCamelCase__ = 37
UpperCamelCase__ = 'gelu'
UpperCamelCase__ = 0.1
UpperCamelCase__ = 0.1
UpperCamelCase__ = 512
UpperCamelCase__ = 16
UpperCamelCase__ = 2
UpperCamelCase__ = 0.02
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 128
UpperCamelCase__ = 2
UpperCamelCase__ = 9
UpperCamelCase__ = 1
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertForMaskedLM(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForSequenceClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFConvBertForMultipleChoice(config=snake_case_ )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForTokenClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = TFConvBertForQuestionAnswering(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : str =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Any =False
a : Dict =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = TFConvBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = True
if hasattr(snake_case_ , 'use_cache' ):
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
UpperCamelCase__ = os.path.join(snake_case_ , 'saved_model' , '1' )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = outputs['encoder_hidden_states']
UpperCamelCase__ = outputs['encoder_attentions']
else:
UpperCamelCase__ = outputs['hidden_states']
UpperCamelCase__ = outputs['attentions']
self.assertEqual(len(snake_case_ ) , snake_case_ )
UpperCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
UpperCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(snake_case_ )[0]
UpperCamelCase__ = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 20 | 1 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
debug_launcher(test_script.main )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
debug_launcher(test_ops.main )
| 20 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
UpperCamelCase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ = 1
if upper_limit > 0:
UpperCamelCase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(SCREAMING_SNAKE_CASE ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
A__ : List[str]= int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20 | 1 |
"""simple docstring"""
import os
import sys
import unittest
A__ : Union[str, Any]= os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A__ : List[str]= os.path.join(git_repo_path, """src""", """transformers""")
A__ : Dict= """
{0} = None
"""
A__ : str= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
A__ : List[Any]= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(snake_case_ )
UpperCamelCase__ = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(snake_case_ , 'tokenizers' )
UpperCamelCase__ = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(snake_case_ , 'tensorflow_text' )
UpperCamelCase__ = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(snake_case_ , 'sentencepiece_and_tokenizers' )
UpperCamelCase__ = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(snake_case_ , 'sentencepiece_and_tensorflow_text' )
UpperCamelCase__ = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(snake_case_ , 'sentencepiece_and_tokenizers_and_vision' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , snake_case_ )
self.assertIn('tensorflow_text' , snake_case_ )
self.assertIn('sentencepiece_and_tokenizers' , snake_case_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(snake_case_ , '\nCONSTANT = None\n' )
UpperCamelCase__ = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
snake_case_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
UpperCamelCase__ = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
UpperCamelCase__ = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
UpperCamelCase__ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , snake_case_ )
| 20 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
A__ : int= get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( _a , unittest.TestCase ):
a : List[str] =SpeechTaTokenizer
a : List[Any] =False
a : List[str] =True
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = SpeechTaTokenizer(snake_case_ )
UpperCamelCase__ = AddedToken('<mask>' , lstrip=snake_case_ , rstrip=snake_case_ )
UpperCamelCase__ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = 'this is a test'
UpperCamelCase__ = 'this is a test'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_=False , snake_case_=20 , snake_case_=5 ) -> Optional[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.get_input_output_texts(snake_case_ )
UpperCamelCase__ = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase__ = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = '<pad>'
UpperCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(snake_case_ ) , 81 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCamelCase__ = tokenizer.vocab_size
UpperCamelCase__ = len(snake_case_ )
self.assertNotEqual(snake_case_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCamelCase__ = ['aaaaa bbbbbb', 'cccccccccdddddddd']
UpperCamelCase__ = tokenizer.add_tokens(snake_case_ )
UpperCamelCase__ = tokenizer.vocab_size
UpperCamelCase__ = len(snake_case_ )
self.assertNotEqual(snake_case_ , 0 )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , len(snake_case_ ) )
self.assertEqual(snake_case_ , all_size + len(snake_case_ ) )
UpperCamelCase__ = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=snake_case_ )
self.assertGreaterEqual(len(snake_case_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCamelCase__ = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
UpperCamelCase__ = tokenizer.add_special_tokens(snake_case_ )
UpperCamelCase__ = tokenizer.vocab_size
UpperCamelCase__ = len(snake_case_ )
self.assertNotEqual(snake_case_ , 0 )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , len(snake_case_ ) )
self.assertEqual(snake_case_ , all_size_a + len(snake_case_ ) )
UpperCamelCase__ = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=snake_case_ )
self.assertGreaterEqual(len(snake_case_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(snake_case_ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCamelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(snake_case_ )
# fmt: off
self.assertListEqual(snake_case_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
# Use custom sequence because this tokenizer does not handle numbers.
UpperCamelCase__ = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
UpperCamelCase__ = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=snake_case_ , )
| 20 |
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[str]= logging.get_logger()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ) -> Optional[Any]:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
UpperCamelCase__ = timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ = timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 1_92:
UpperCamelCase__ = timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 2_56:
UpperCamelCase__ = timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 3_84:
UpperCamelCase__ = timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE )
from_model.eval()
UpperCamelCase__ = LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ = from_model.state_dict()
UpperCamelCase__ = list(from_model.state_dict().keys() )
UpperCamelCase__ = list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = torch.randn((2, 3, 2_24, 2_24) )
UpperCamelCase__ = from_model(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = our_model(SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
UpperCamelCase__ = name
print(SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCamelCase__ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = 10_00
UpperCamelCase__ = (1, num_labels)
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = num_labels
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = partial(SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
UpperCamelCase__ = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE , names_to_config[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
A__ : List[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
A__ : Any= parser.parse_args()
A__ : Path= args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 2_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [0 for i in range(n + 1 )]
UpperCamelCase__ = 1
UpperCamelCase__ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 1
UpperCamelCase__ = 0
for i in range(SCREAMING_SNAKE_CASE ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 | 1 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __lowerCamelCase :
a : str =None
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
UpperCamelCase__ = self.feature_extraction_class.from_json_file(snake_case_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
UpperCamelCase__ = self.feature_extraction_class.from_pretrained(snake_case_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.feature_extraction_class()
self.assertIsNotNone(snake_case_ )
| 20 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=True , snake_case_=False , snake_case_=10 , snake_case_=3 , snake_case_=32 * 4 , snake_case_=32 * 6 , snake_case_=4 , snake_case_=32 , ) -> Any:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = is_training
UpperCamelCase__ = use_auxiliary_loss
UpperCamelCase__ = num_queries
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_size
UpperCamelCase__ = max_size
UpperCamelCase__ = num_labels
UpperCamelCase__ = mask_feature_size
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
UpperCamelCase__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
UpperCamelCase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
UpperCamelCase__ = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
UpperCamelCase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = output.encoder_hidden_states
UpperCamelCase__ = output.pixel_decoder_hidden_states
UpperCamelCase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=False ) -> Optional[Any]:
with torch.no_grad():
UpperCamelCase__ = MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
UpperCamelCase__ = model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase__ = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
comm_check_on_output(snake_case_ )
UpperCamelCase__ = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Union[str, Any] =(MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a : List[Any] =(
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a : Union[str, Any] =False
a : Optional[int] =False
a : int =False
a : List[Any] =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = MaskFormerModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCamelCase__ = MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = (self.model_tester.min_size,) * 2
UpperCamelCase__ = {
'pixel_values': torch.randn((2, 3, *size) , device=snake_case_ ),
'mask_labels': torch.randn((2, 10, *size) , device=snake_case_ ),
'class_labels': torch.zeros(2 , 10 , device=snake_case_ ).long(),
}
UpperCamelCase__ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
UpperCamelCase__ = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ ).to(snake_case_ )
UpperCamelCase__ = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
UpperCamelCase__ = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
UpperCamelCase__ = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
UpperCamelCase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCamelCase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A__ : Any= 1E-4
def lowerCAmelCase_( ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(snake_case_ )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(snake_case_ , return_tensors='pt' ).to(snake_case_ )
UpperCamelCase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
UpperCamelCase__ = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
UpperCamelCase__ = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(snake_case_ )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(snake_case_ , return_tensors='pt' ).to(snake_case_ )
UpperCamelCase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**snake_case_ )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
UpperCamelCase__ = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(snake_case_ )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(snake_case_ , return_tensors='pt' ).to(snake_case_ )
UpperCamelCase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**snake_case_ )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
UpperCamelCase__ = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(snake_case_ )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCamelCase__ = inputs['pixel_values'].to(snake_case_ )
UpperCamelCase__ = [el.to(snake_case_ ) for el in inputs['mask_labels']]
UpperCamelCase__ = [el.to(snake_case_ ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase__ = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 20 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if num < 0:
return False
UpperCamelCase__ = num
UpperCamelCase__ = 0
while num > 0:
UpperCamelCase__ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 | 1 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
A__ : Optional[int]= logging.getLogger(__name__)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = bnb_quantization_config.load_in_abit
UpperCamelCase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
UpperCamelCase__ = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
UpperCamelCase__ = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase__ = get_keys_to_not_convert(SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase__ = []
UpperCamelCase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE )
# compatibility with peft
UpperCamelCase__ = load_in_abit
UpperCamelCase__ = load_in_abit
UpperCamelCase__ = get_parameter_device(SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
UpperCamelCase__ = replace_with_bnb_layers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
# convert param to the right dtype
UpperCamelCase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase__ = name.replace('.weight' , '' ).replace('.bias' , '' )
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE ):
param.to(SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
UpperCamelCase__ = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_memory=SCREAMING_SNAKE_CASE , no_split_module_classes=SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase__ = True
UpperCamelCase__ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE , offload_state_dict=SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE , device_map=SCREAMING_SNAKE_CASE , offload_dir=SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase__ = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
UpperCamelCase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase__ = {}
UpperCamelCase__ = special_dtypes
UpperCamelCase__ = no_split_module_classes
UpperCamelCase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase__ = get_balanced_memory(
SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase__ = max_memory
UpperCamelCase__ = infer_auto_device_map(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
UpperCamelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
UpperCamelCase__ = []
UpperCamelCase__ , UpperCamelCase__ = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase__ = []
current_key_name.append(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase__ = '.'.join(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
UpperCamelCase__ = module.weight.data
if module.bias is not None:
UpperCamelCase__ = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = True
if len(list(module.children() ) ) > 0:
UpperCamelCase__ , UpperCamelCase__ = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
UpperCamelCase__ = deepcopy(SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase__ = find_tied_parameters(SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase__ = sum(SCREAMING_SNAKE_CASE , [] )
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
UpperCamelCase__ = False
if hasattr(SCREAMING_SNAKE_CASE , 'base_model_prefix' ):
UpperCamelCase__ = not hasattr(SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase__ = list(model.named_children() )
UpperCamelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase__ = set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = list(set(SCREAMING_SNAKE_CASE ) ) + list(SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
UpperCamelCase__ = ['.weight', '.bias']
UpperCamelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase__ = name.replace(SCREAMING_SNAKE_CASE , '' )
filtered_module_names.append(SCREAMING_SNAKE_CASE )
return filtered_module_names
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return next(parameter.parameters() ).device
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 , dtype=SCREAMING_SNAKE_CASE , value=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = param_name
UpperCamelCase__ = model
if "." in tensor_name:
UpperCamelCase__ = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
UpperCamelCase__ = new_module
UpperCamelCase__ = splits[-1]
# offload weights
UpperCamelCase__ = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , )
else:
offload_weight(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
offload_weight(SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 'meta' , dtype=SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=False , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=19 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=snake_case_ , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = EsmForProteinFolding(config=snake_case_ ).float()
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Tuple =False
a : List[str] =(EsmForProteinFolding,) if is_torch_available() else ()
a : Optional[Any] =()
a : List[str] ={} if is_torch_available() else {}
a : Optional[Any] =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = EsmFoldModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
@unittest.skip('Does not support attention outputs' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
pass
@unittest.skip
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
pass
@unittest.skip('ESMFold only has one output format.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
pass
@require_torch
class __lowerCamelCase ( _a ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
UpperCamelCase__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase__ = model(snake_case_ )['positions']
UpperCamelCase__ = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , snake_case_ , atol=1E-4 ) )
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
A__ : Tuple= logging.get_logger(__name__)
@dataclass
class __lowerCamelCase ( _a ):
a : Optional[Any] =[
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **snake_case_ ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCamelCase__ = deprecated_arg[3:]
setattr(self , snake_case_ , not kwargs.pop(snake_case_ ) )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
UpperCamelCase__ = kwargs.pop('torchscript' , self.torchscript )
UpperCamelCase__ = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
UpperCamelCase__ = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**snake_case_ )
a : bool =field(default=_a , metadata={"""help""": """Trace the models using torchscript"""} )
a : bool =field(default=_a , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
a : str =field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
UpperCamelCase__ = torch.device('cpu' )
UpperCamelCase__ = 0
elif is_torch_tpu_available():
UpperCamelCase__ = xm.xla_device()
UpperCamelCase__ = 0
else:
UpperCamelCase__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCamelCase__ = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> "torch.device":
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
return self.n_gpu > 0
| 20 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : List[Any]= ["""bert-base-uncased""", """bert-base-cased"""]
A__ : Optional[int]= """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __lowerCamelCase ( tf.keras.Model ):
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase__ = TFAutoModel.from_config(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = self.tokenizer(snake_case_ )
UpperCamelCase__ = self.bert(**snake_case_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case_ , use_fast_bert_tokenizer=snake_case_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(snake_case_ , return_tensors='tf' , padding='longest' )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(snake_case_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(snake_case_ )
UpperCamelCase__ = compiled_tokenizer(snake_case_ )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(snake_case_ ) / 'saved.model'
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = loaded_model(snake_case_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
UpperCamelCase__ = UpperCamelCase__ = UpperCamelCase__ = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
UpperCamelCase__ = numbers[i]
if number < 0:
UpperCamelCase__ , UpperCamelCase__ = min_till_now, max_till_now
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE , max_till_now * number )
UpperCamelCase__ = min(SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_prod
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.