code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=12 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=0.02 , snake_case_=0 , snake_case_=None , ) -> Dict:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = projection_dim
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = initializer_range
UpperCamelCase__ = scope
UpperCamelCase__ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCamelCase__ = input_mask.numpy()
UpperCamelCase__ , UpperCamelCase__ = input_mask.shape
UpperCamelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case_ ):
UpperCamelCase__ = 1
UpperCamelCase__ = 0
UpperCamelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = TFBlipTextModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , training=snake_case_ )
UpperCamelCase__ = model(snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , unittest.TestCase ):
a : List[str] =(TFBlipTextModel,) if is_tf_available() else ()
a : int =False
a : Any =False
a : Optional[Any] =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = BlipTextModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TFBlipTextModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=True ) -> Tuple:
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case_ )
| 20 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : List[Any]= ["""bert-base-uncased""", """bert-base-cased"""]
A__ : Optional[int]= """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __lowerCamelCase ( tf.keras.Model ):
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase__ = TFAutoModel.from_config(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = self.tokenizer(snake_case_ )
UpperCamelCase__ = self.bert(**snake_case_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case_ , use_fast_bert_tokenizer=snake_case_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(snake_case_ , return_tensors='tf' , padding='longest' )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(snake_case_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(snake_case_ )
UpperCamelCase__ = compiled_tokenizer(snake_case_ )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(snake_case_ ) / 'saved.model'
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = loaded_model(snake_case_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 20 | 1 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Dict =KandinskyVaaControlnetPipeline
a : List[str] =["""image_embeds""", """negative_image_embeds""", """hint"""]
a : str =["""image_embeds""", """negative_image_embeds""", """hint"""]
a : Any =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Optional[Any] =False
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return 100
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase__ = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCamelCase__ = UNetaDConditionModel(**snake_case_ )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = self.dummy_unet
UpperCamelCase__ = self.dummy_movq
UpperCamelCase__ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case_ , )
UpperCamelCase__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_=0 ) -> int:
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case_ )
# create hint
UpperCamelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
if str(snake_case_ ).startswith('mps' ):
UpperCamelCase__ = torch.manual_seed(snake_case_ )
else:
UpperCamelCase__ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase__ = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = 'cpu'
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**snake_case_ )
UpperCamelCase__ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = pipe(**self.get_dummy_inputs(snake_case_ ) )
UpperCamelCase__ = output.images
UpperCamelCase__ = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
UpperCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
UpperCamelCase__ = torch.from_numpy(np.array(snake_case_ ) ).float() / 255.0
UpperCamelCase__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
UpperCamelCase__ = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
UpperCamelCase__ = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = 'A robot, 4k photo'
UpperCamelCase__ = torch.Generator(device='cuda' ).manual_seed(0 )
UpperCamelCase__ , UpperCamelCase__ = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
UpperCamelCase__ = torch.Generator(device='cuda' ).manual_seed(0 )
UpperCamelCase__ = pipeline(
image_embeds=snake_case_ , negative_image_embeds=snake_case_ , hint=snake_case_ , generator=snake_case_ , num_inference_steps=100 , output_type='np' , )
UpperCamelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 20 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 20 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Union[str, Any]= {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any]= ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
A__ : Optional[Any]= _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : str= {"""vocab_file""": """spiece.model"""}
A__ : Union[str, Any]= {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ : Union[str, Any]= {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
A__ : Optional[Any]= """▁"""
class __lowerCamelCase ( _a ):
a : Dict =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=100 , snake_case_=None , snake_case_ = None , snake_case_=True , **snake_case_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case_ : bool('extra_id' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case_ , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case_ )) + [1]
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return list(
set(filter(lambda snake_case_ : bool(re.search(r'<extra_id_\d+>' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return [self._convert_token_to_id(snake_case_ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[int]:
if len(snake_case_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , snake_case_ ) -> Any:
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(snake_case_ , ' ' )
return super().tokenize(snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[Any]:
if not self.legacy:
UpperCamelCase__ = text.startswith(snake_case_ )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case_ ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
if token.startswith('<extra_id_' ):
UpperCamelCase__ = re.match(r'<extra_id_(\d+)>' , snake_case_ )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(snake_case_ )
else:
UpperCamelCase__ = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = []
UpperCamelCase__ = ''
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(snake_case_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , 'wb' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 20 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=True , ) -> Any:
UpperCamelCase__ = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Any =ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'clusters' ) )
self.assertTrue(hasattr(snake_case_ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case_ , 'size' ) )
self.assertTrue(hasattr(snake_case_ , 'do_normalize' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = os.path.join(snake_case_ , 'image_processor.json' )
image_processor_first.to_json_file(snake_case_ )
UpperCamelCase__ = self.image_processing_class.from_json_file(snake_case_ ).to_dict()
UpperCamelCase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case_ )
UpperCamelCase__ = self.image_processing_class.from_pretrained(snake_case_ ).to_dict()
UpperCamelCase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
pass
def lowerCAmelCase_( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
UpperCamelCase__ = Image.open(dataset[4]['file'] )
UpperCamelCase__ = Image.open(dataset[5]['file'] )
UpperCamelCase__ = [imagea, imagea]
return images
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
UpperCamelCase__ = prepare_images()
# test non-batched
UpperCamelCase__ = image_processing(images[0] , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
UpperCamelCase__ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case_ )
# test batched
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
UpperCamelCase__ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case_ )
| 20 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A__ : Any= TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(snake_case_ ) , self.find_set(snake_case_ ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# add an edge with the given weight
self.add_node(snake_case_ )
self.add_node(snake_case_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ , snake_case_ , snake_case_ )
disjoint_set.union(snake_case_ , snake_case_ )
return graph
| 20 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 |
"""simple docstring"""
A__ : Tuple= """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__ : int= """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 | 1 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if not sentence:
return ""
UpperCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 20 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : Any= """src/diffusers"""
# Matches is_xxx_available()
A__ : Tuple= re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A__ : Any= re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A__ : Optional[Any]= """
{0} = None
"""
A__ : List[Any]= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A__ : Dict= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> str:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
UpperCamelCase__ = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F'dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[int]= parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
A__ : List[Any]= logging.get_logger(__name__)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = downstream_dict['projector.weight']
UpperCamelCase__ = downstream_dict['projector.bias']
UpperCamelCase__ = downstream_dict['model.post_net.linear.weight']
UpperCamelCase__ = downstream_dict['model.post_net.linear.bias']
return model
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = downstream_dict['model.linear.weight']
UpperCamelCase__ = downstream_dict['model.linear.bias']
return model
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = downstream_dict['connector.weight']
UpperCamelCase__ = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCamelCase__ = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
UpperCamelCase__ = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
UpperCamelCase__ = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
UpperCamelCase__ = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
UpperCamelCase__ = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
UpperCamelCase__ = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
UpperCamelCase__ = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
UpperCamelCase__ = checkpoint['Downstream']
UpperCamelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , do_normalize=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
UpperCamelCase__ = convert_classification(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif arch.endswith('ForAudioFrameClassification' ):
UpperCamelCase__ = convert_diarization(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif arch.endswith('ForXVector' ):
UpperCamelCase__ = convert_xvector(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
UpperCamelCase__ = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : str= argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
A__ : List[Any]= parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 20 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20 | 1 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A__ : Optional[int]= object()
# For specifying empty leaf dict `{}`
A__ : Optional[Any]= object()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE ) + 1 ):
UpperCamelCase__ = [x.match(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE ):
return True
return False
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
def replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P('mp' , SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ = _get_partition_rules()
UpperCamelCase__ = _replacement_rules(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE )}
UpperCamelCase__ = {k: replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE ) )
| 20 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
A__ : List[Any]= logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , SCREAMING_SNAKE_CASE , )
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase__ , UpperCamelCase__ = image[0].size
UpperCamelCase__ , UpperCamelCase__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCamelCase__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
UpperCamelCase__ = np.concatenate(SCREAMING_SNAKE_CASE , axis=0 )
UpperCamelCase__ = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
UpperCamelCase__ = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase__ = 2.0 * image - 1.0
UpperCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
return image
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
return mask
elif isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCamelCase__ , UpperCamelCase__ = mask[0].size
UpperCamelCase__ , UpperCamelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase__ = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
UpperCamelCase__ = np.concatenate(SCREAMING_SNAKE_CASE , axis=0 )
UpperCamelCase__ = mask.astype(np.floataa ) / 255.0
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE )
elif isinstance(mask[0] , torch.Tensor ):
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
return mask
class __lowerCamelCase ( _a ):
a : UNetaDModel
a : RePaintScheduler
def __init__( self , snake_case_ , snake_case_ ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self , snake_case_ , snake_case_ , snake_case_ = 250 , snake_case_ = 0.0 , snake_case_ = 10 , snake_case_ = 10 , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCamelCase__ = image
UpperCamelCase__ = _preprocess_image(snake_case_ )
UpperCamelCase__ = original_image.to(device=self.device , dtype=self.unet.dtype )
UpperCamelCase__ = _preprocess_mask(snake_case_ )
UpperCamelCase__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
UpperCamelCase__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(snake_case_ )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
UpperCamelCase__ = original_image.shape
UpperCamelCase__ = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(snake_case_ , snake_case_ , snake_case_ , self.device )
UpperCamelCase__ = eta
UpperCamelCase__ = self.scheduler.timesteps[0] + 1
UpperCamelCase__ = generator[0] if isinstance(snake_case_ , snake_case_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCamelCase__ = self.unet(snake_case_ , snake_case_ ).sample
# compute previous image: x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCamelCase__ = self.scheduler.undo_step(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = t
UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 20 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
UpperCamelCase__ = parent
UpperCamelCase__ = 13
UpperCamelCase__ = 7
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = 99
UpperCamelCase__ = 384
UpperCamelCase__ = 2
UpperCamelCase__ = 4
UpperCamelCase__ = 37
UpperCamelCase__ = 'gelu'
UpperCamelCase__ = 0.1
UpperCamelCase__ = 0.1
UpperCamelCase__ = 512
UpperCamelCase__ = 16
UpperCamelCase__ = 2
UpperCamelCase__ = 0.02
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 128
UpperCamelCase__ = 2
UpperCamelCase__ = 9
UpperCamelCase__ = 1
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertForMaskedLM(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForSequenceClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFConvBertForMultipleChoice(config=snake_case_ )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForTokenClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = TFConvBertForQuestionAnswering(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : str =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Any =False
a : Dict =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = TFConvBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = True
if hasattr(snake_case_ , 'use_cache' ):
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
UpperCamelCase__ = os.path.join(snake_case_ , 'saved_model' , '1' )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = outputs['encoder_hidden_states']
UpperCamelCase__ = outputs['encoder_attentions']
else:
UpperCamelCase__ = outputs['hidden_states']
UpperCamelCase__ = outputs['attentions']
self.assertEqual(len(snake_case_ ) , snake_case_ )
UpperCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
UpperCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(snake_case_ )[0]
UpperCamelCase__ = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 20 | 1 |
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowerCamelCase ( _a ):
a : Optional[int] =["""image_processor"""]
a : List[Any] ="""SamImageProcessor"""
def __init__( self , snake_case_ ) -> Any:
super().__init__(snake_case_ )
UpperCamelCase__ = self.image_processor
UpperCamelCase__ = -10
UpperCamelCase__ = self.image_processor.size['longest_edge']
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_ = None , **snake_case_ , ) -> BatchEncoding:
UpperCamelCase__ = self.image_processor(
snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# pop arguments that are not used in the foward but used nevertheless
UpperCamelCase__ = encoding_image_processor['original_sizes']
if hasattr(snake_case_ , 'numpy' ): # Checks if Torch or TF tensor
UpperCamelCase__ = original_sizes.numpy()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._check_and_preprocess_points(
input_points=snake_case_ , input_labels=snake_case_ , input_boxes=snake_case_ , )
UpperCamelCase__ = self._normalize_and_convert(
snake_case_ , snake_case_ , input_points=snake_case_ , input_labels=snake_case_ , input_boxes=snake_case_ , return_tensors=snake_case_ , )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="pt" , ) -> Tuple:
if input_points is not None:
if len(snake_case_ ) != len(snake_case_ ):
UpperCamelCase__ = [
self._normalize_coordinates(self.target_size , snake_case_ , original_sizes[0] ) for point in input_points
]
else:
UpperCamelCase__ = [
self._normalize_coordinates(self.target_size , snake_case_ , snake_case_ )
for point, original_size in zip(snake_case_ , snake_case_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCamelCase__ , UpperCamelCase__ = self._pad_points_and_labels(snake_case_ , snake_case_ )
UpperCamelCase__ = np.array(snake_case_ )
if input_labels is not None:
UpperCamelCase__ = np.array(snake_case_ )
if input_boxes is not None:
if len(snake_case_ ) != len(snake_case_ ):
UpperCamelCase__ = [
self._normalize_coordinates(self.target_size , snake_case_ , original_sizes[0] , is_bounding_box=snake_case_ )
for box in input_boxes
]
else:
UpperCamelCase__ = [
self._normalize_coordinates(self.target_size , snake_case_ , snake_case_ , is_bounding_box=snake_case_ )
for box, original_size in zip(snake_case_ , snake_case_ )
]
UpperCamelCase__ = np.array(snake_case_ )
if input_boxes is not None:
if return_tensors == "pt":
UpperCamelCase__ = torch.from_numpy(snake_case_ )
# boxes batch size of 1 by default
UpperCamelCase__ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCamelCase__ = tf.convert_to_tensor(snake_case_ )
# boxes batch size of 1 by default
UpperCamelCase__ = tf.expand_dims(snake_case_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCamelCase__ = torch.from_numpy(snake_case_ )
# point batch size of 1 by default
UpperCamelCase__ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCamelCase__ = tf.convert_to_tensor(snake_case_ )
# point batch size of 1 by default
UpperCamelCase__ = tf.expand_dims(snake_case_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCamelCase__ = torch.from_numpy(snake_case_ )
# point batch size of 1 by default
UpperCamelCase__ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCamelCase__ = tf.convert_to_tensor(snake_case_ )
# point batch size of 1 by default
UpperCamelCase__ = tf.expand_dims(snake_case_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = max([point.shape[0] for point in input_points] )
UpperCamelCase__ = []
for i, point in enumerate(snake_case_ ):
if point.shape[0] != expected_nb_points:
UpperCamelCase__ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
UpperCamelCase__ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(snake_case_ )
UpperCamelCase__ = processed_input_points
return input_points, input_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=False ) -> np.ndarray:
UpperCamelCase__ , UpperCamelCase__ = original_size
UpperCamelCase__ , UpperCamelCase__ = self.image_processor._get_preprocess_shape(snake_case_ , longest_edge=snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ ).astype(snake_case_ )
if is_bounding_box:
UpperCamelCase__ = coords.reshape(-1 , 2 , 2 )
UpperCamelCase__ = coords[..., 0] * (new_w / old_w)
UpperCamelCase__ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCamelCase__ = coords.reshape(-1 , 4 )
return coords
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , ) -> Union[str, Any]:
if input_points is not None:
if hasattr(snake_case_ , 'numpy' ): # Checks for TF or Torch tensor
UpperCamelCase__ = input_points.numpy().tolist()
if not isinstance(snake_case_ , snake_case_ ) or not isinstance(input_points[0] , snake_case_ ):
raise ValueError('Input points must be a list of list of floating points.' )
UpperCamelCase__ = [np.array(snake_case_ ) for input_point in input_points]
else:
UpperCamelCase__ = None
if input_labels is not None:
if hasattr(snake_case_ , 'numpy' ):
UpperCamelCase__ = input_labels.numpy().tolist()
if not isinstance(snake_case_ , snake_case_ ) or not isinstance(input_labels[0] , snake_case_ ):
raise ValueError('Input labels must be a list of list integers.' )
UpperCamelCase__ = [np.array(snake_case_ ) for label in input_labels]
else:
UpperCamelCase__ = None
if input_boxes is not None:
if hasattr(snake_case_ , 'numpy' ):
UpperCamelCase__ = input_boxes.numpy().tolist()
if (
not isinstance(snake_case_ , snake_case_ )
or not isinstance(input_boxes[0] , snake_case_ )
or not isinstance(input_boxes[0][0] , snake_case_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
UpperCamelCase__ = [np.array(snake_case_ ).astype(np.floataa ) for box in input_boxes]
else:
UpperCamelCase__ = None
return input_points, input_labels, input_boxes
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case_ , **snake_case_ ) -> Dict:
return self.image_processor.post_process_masks(*snake_case_ , **snake_case_ )
| 20 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20 | 1 |
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A__ : Dict= get_tests_dir("""fixtures""")
A__ : str= get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
A__ : Any= get_tests_dir("""fixtures/dummy-config.json""")
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = 0
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(snake_case_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCamelCase__ = WavaVecaFeatureExtractor(**snake_case_ )
# save in new folder
model_config.save_pretrained(snake_case_ )
config.save_pretrained(snake_case_ )
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(snake_case_ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
with self.assertRaisesRegex(
snake_case_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained('bert-base' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
with self.assertRaisesRegex(
snake_case_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(snake_case_ , revision='aaaaaa' )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
with self.assertRaisesRegex(
snake_case_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case_ ):
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case_ ):
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=snake_case_ )
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=snake_case_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case_ )
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(snake_case_ , trust_remote_code=snake_case_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
try:
AutoConfig.register('custom' , snake_case_ )
AutoFeatureExtractor.register(snake_case_ , snake_case_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case_ ):
AutoFeatureExtractor.register(snake_case_ , snake_case_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ = CustomFeatureExtractor.from_pretrained(snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case_ )
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
class __lowerCamelCase ( _a ):
a : Any =True
try:
AutoConfig.register('custom' , snake_case_ )
AutoFeatureExtractor.register(snake_case_ , snake_case_ )
# If remote code is not set, the default is to use local
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=snake_case_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=snake_case_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(snake_case_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 20 |
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : List[str]= {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any]= ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str= [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A__ : List[Any]= _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
UpperCamelCase__ , UpperCamelCase__ = head.next, head
while fast and fast.next:
UpperCamelCase__ = fast.next.next
UpperCamelCase__ = slow.next
UpperCamelCase__ = slow.next
UpperCamelCase__ = None # Don't forget here! But forget still works!
# reverse the second part
UpperCamelCase__ = None
while second:
UpperCamelCase__ = second.next
UpperCamelCase__ = node
UpperCamelCase__ = second
UpperCamelCase__ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCamelCase__ = node.next
UpperCamelCase__ = head.next
return True
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCamelCase__ = UpperCamelCase__ = UpperCamelCase__ = head
while fast and fast.next:
UpperCamelCase__ , UpperCamelCase__ = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCamelCase__ = [slow.val]
while slow.next:
UpperCamelCase__ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCamelCase__ = cur.next
return True
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if not head or not head.next:
return True
UpperCamelCase__ = {}
UpperCamelCase__ = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ = [pos]
UpperCamelCase__ = head.next
pos += 1
UpperCamelCase__ = pos - 1
UpperCamelCase__ = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE ) % 2 != 0:
middle += 1
else:
UpperCamelCase__ = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 20 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 | 1 |
"""simple docstring"""
from PIL import Image
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Image:
"""simple docstring"""
UpperCamelCase__ = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(SCREAMING_SNAKE_CASE ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
A__ : List[str]= change_contrast(img, 1_70)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 20 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ : Optional[Any]= logging.get_logger(__name__)
class __lowerCamelCase ( _a , _a ):
a : Optional[Any] ="""maskformer-swin"""
a : int ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case_=224 , snake_case_=4 , snake_case_=3 , snake_case_=96 , snake_case_=[2, 2, 6, 2] , snake_case_=[3, 6, 12, 24] , snake_case_=7 , snake_case_=4.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(**snake_case_ )
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = hidden_act
UpperCamelCase__ = use_absolute_embeddings
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
UpperCamelCase__ = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case_ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 20 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
if num <= 0:
UpperCamelCase__ = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [True] * (num + 1)
UpperCamelCase__ = []
UpperCamelCase__ = 2
UpperCamelCase__ = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
UpperCamelCase__ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 20 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 | 1 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
A__ : str= {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = "dhaka" , SCREAMING_SNAKE_CASE = 5 ) -> int:
"""simple docstring"""
UpperCamelCase__ = min(SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
UpperCamelCase__ = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
UpperCamelCase__ = requests.get('https://www.google.com/search' , params=SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = BeautifulSoup(html.text , 'html.parser' )
UpperCamelCase__ = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
UpperCamelCase__ = json.dumps(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = json.loads(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
UpperCamelCase__ = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
UpperCamelCase__ = bytes(SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
UpperCamelCase__ = bytes(SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
UpperCamelCase__ = urllib.request.build_opener()
UpperCamelCase__ = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = F'query_{query.replace(" " , "_" )}'
if not os.path.exists(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
SCREAMING_SNAKE_CASE , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
A__ : Tuple= download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print("""Please provide a search term.""")
raise
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Any= """▁"""
A__ : List[str]= get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __lowerCamelCase ( _a , unittest.TestCase ):
a : List[Any] =BertGenerationTokenizer
a : Union[str, Any] =False
a : List[str] =True
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
super().setUp()
UpperCamelCase__ = BertGenerationTokenizer(snake_case_ , keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = '<s>'
UpperCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(snake_case_ ) , 1002 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = BertGenerationTokenizer(snake_case_ , keep_accents=snake_case_ )
UpperCamelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [285, 46, 10, 170, 382] , )
UpperCamelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = 'Hello World!'
UpperCamelCase__ = [1_8536, 2260, 101]
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
UpperCamelCase__ = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
UpperCamelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCamelCase__ = ' '.join(snake_case_ )
UpperCamelCase__ = self.big_tokenizer.encode_plus(snake_case_ , return_tensors='pt' , return_token_type_ids=snake_case_ )
UpperCamelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=snake_case_ )
UpperCamelCase__ = BertGenerationConfig()
UpperCamelCase__ = BertGenerationEncoder(snake_case_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case_ )
model(**snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
# fmt: off
UpperCamelCase__ = {'input_ids': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ : str= 16
A__ : Dict= 32
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase__ = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase__ = 8
else:
UpperCamelCase__ = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding='longest' , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase__ = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A__ : Optional[Any]= mocked_dataloaders # noqa: F811
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE ) == "1":
UpperCamelCase__ = 2
# Initialize accelerator
UpperCamelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ = config['lr']
UpperCamelCase__ = int(config['num_epochs'] )
UpperCamelCase__ = int(config['seed'] )
UpperCamelCase__ = int(config['batch_size'] )
UpperCamelCase__ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
UpperCamelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase__ = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase__ = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCamelCase__ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ = outputs.loss
UpperCamelCase__ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCamelCase__ = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ = outputs.logits.argmax(dim=-1 )
UpperCamelCase__ , UpperCamelCase__ = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCamelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
UpperCamelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 20 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : List[Any]= ["""bert-base-uncased""", """bert-base-cased"""]
A__ : Optional[int]= """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __lowerCamelCase ( tf.keras.Model ):
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase__ = TFAutoModel.from_config(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = self.tokenizer(snake_case_ )
UpperCamelCase__ = self.bert(**snake_case_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case_ , use_fast_bert_tokenizer=snake_case_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(snake_case_ , return_tensors='tf' , padding='longest' )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(snake_case_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(snake_case_ )
UpperCamelCase__ = compiled_tokenizer(snake_case_ )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(snake_case_ ) / 'saved.model'
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = loaded_model(snake_case_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 20 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A__ : Optional[int]= logging.get_logger(__name__)
A__ : Dict= {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
A__ : int= [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = {}
with open(SCREAMING_SNAKE_CASE , 'r' ) as file:
for line_number, line in enumerate(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = line.strip()
if line:
UpperCamelCase__ = line.split()
UpperCamelCase__ = line_number
UpperCamelCase__ = words[0]
UpperCamelCase__ = value
return result
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
for attribute in key.split('.' ):
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCamelCase__ = 'param'
if weight_type is not None and weight_type != "param":
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
UpperCamelCase__ = hf_pointer
for attribute in hf_param_name.split('.' ):
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = shape_pointer.shape
# let's reduce dimension
UpperCamelCase__ = value[0]
else:
UpperCamelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
UpperCamelCase__ = value
elif weight_type == "weight_g":
UpperCamelCase__ = value
elif weight_type == "weight_v":
UpperCamelCase__ = value
elif weight_type == "bias":
UpperCamelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = value
else:
UpperCamelCase__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCamelCase__ = 'param'
if weight_type is not None and weight_type != "param":
UpperCamelCase__ = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCamelCase__ = '.'.join([key, hf_param_name] )
else:
UpperCamelCase__ = key
UpperCamelCase__ = value if 'lm_head' in full_key else value[0]
A__ : Optional[int]= {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = False
for key, mapped_key in MAPPING.items():
UpperCamelCase__ = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase__ = True
if "*" in mapped_key:
UpperCamelCase__ = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
UpperCamelCase__ = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
UpperCamelCase__ = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ = 'weight_v'
elif "bias" in name:
UpperCamelCase__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ = 'weight'
else:
UpperCamelCase__ = None
if hf_dict is not None:
rename_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return is_used
return is_used
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = fairseq_model.state_dict()
UpperCamelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ = True
else:
UpperCamelCase__ = load_wavaveca_layer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = full_name.split('conv_layers.' )[-1]
UpperCamelCase__ = name.split('.' )
UpperCamelCase__ = int(items[0] )
UpperCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
UpperCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
UpperCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
UpperCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
UpperCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
if config_path is not None:
UpperCamelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ = WavaVecaConfig()
if is_seq_class:
UpperCamelCase__ = read_txt_into_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = idalabel
UpperCamelCase__ = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
UpperCamelCase__ = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ = target_dict.pad_index
UpperCamelCase__ = target_dict.bos_index
UpperCamelCase__ = target_dict.eos_index
UpperCamelCase__ = len(target_dict.symbols )
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ = 0
UpperCamelCase__ = 1
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , )
UpperCamelCase__ = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
UpperCamelCase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = WavaVecaForCTC(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase__ = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Union[str, Any]= argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
A__ : Tuple= parser.parse_args()
A__ : Optional[int]= not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Optional[Any] =MgpstrTokenizer
a : List[Any] =False
a : List[Any] ={}
a : Tuple =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
super().setUp()
# fmt: off
UpperCamelCase__ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
UpperCamelCase__ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case_ ) + '\n' )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> List[str]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = 'tester'
UpperCamelCase__ = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCamelCase__ = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
UpperCamelCase__ = tokenizer.encode([special_token] , add_special_tokens=snake_case_ )
self.assertEqual(len(snake_case_ ) , 1 )
UpperCamelCase__ = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCamelCase__ , UpperCamelCase__ = self.get_input_output_texts(snake_case_ )
UpperCamelCase__ = tokenizer.tokenize(snake_case_ )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(snake_case_ )
UpperCamelCase__ = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertNotEqual(len(snake_case_ ) , 0 )
UpperCamelCase__ = tokenizer.decode(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(text_a.replace(' ' , '' ) , snake_case_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
pass
| 20 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 20 | 1 |
"""simple docstring"""
import requests
A__ : Optional[int]= """""" # <-- Put your OpenWeatherMap appid here!
A__ : str= """https://api.openweathermap.org/data/2.5/"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = "Chicago" , SCREAMING_SNAKE_CASE = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = "Kolkata, India" , SCREAMING_SNAKE_CASE = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 55.68 , SCREAMING_SNAKE_CASE = 12.57 , SCREAMING_SNAKE_CASE = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
A__ : Tuple= input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 20 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : str= {"""vocab_file""": """spiece.model"""}
A__ : Union[str, Any]= {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ : Union[str, Any]= {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
A__ : Optional[Any]= """▁"""
class __lowerCamelCase ( _a ):
a : Dict =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=100 , snake_case_=None , snake_case_ = None , snake_case_=True , **snake_case_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case_ : bool('extra_id' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case_ , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case_ )) + [1]
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return list(
set(filter(lambda snake_case_ : bool(re.search(r'<extra_id_\d+>' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return [self._convert_token_to_id(snake_case_ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[int]:
if len(snake_case_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , snake_case_ ) -> Any:
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(snake_case_ , ' ' )
return super().tokenize(snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[Any]:
if not self.legacy:
UpperCamelCase__ = text.startswith(snake_case_ )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case_ ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
if token.startswith('<extra_id_' ):
UpperCamelCase__ = re.match(r'<extra_id_(\d+)>' , snake_case_ )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(snake_case_ )
else:
UpperCamelCase__ = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = []
UpperCamelCase__ = ''
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(snake_case_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , 'wb' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 20 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( _a ):
a : Optional[Any] =["""image_processor""", """tokenizer"""]
a : Tuple ="""LayoutLMv2ImageProcessor"""
a : Union[str, Any] =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ) -> Union[str, Any]:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
UpperCamelCase__ = kwargs.pop('feature_extractor' )
UpperCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCamelCase__ = self.image_processor(images=snake_case_ , return_tensors=snake_case_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase__ = features['words']
UpperCamelCase__ = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel values
UpperCamelCase__ = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCamelCase__ = self.get_overflowing_images(snake_case_ , encoded_inputs['overflow_to_sample_mapping'] )
UpperCamelCase__ = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCamelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F' {len(snake_case_ )} and {len(snake_case_ )}' )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self , *snake_case_ , **snake_case_ ) -> int:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case_ , **snake_case_ ) -> Optional[Any]:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor
| 20 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A__ : Any= TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(snake_case_ ) , self.find_set(snake_case_ ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# add an edge with the given weight
self.add_node(snake_case_ )
self.add_node(snake_case_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ , snake_case_ , snake_case_ )
disjoint_set.union(snake_case_ , snake_case_ )
return graph
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A__ : Optional[Any]= TypeVar("""T""")
A__ : Optional[Any]= TypeVar("""U""")
class __lowerCamelCase ( Generic[T, U] ):
def __init__( self , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = key
UpperCamelCase__ = val
UpperCamelCase__ = None
UpperCamelCase__ = None
def __repr__( self ) -> str:
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __lowerCamelCase ( Generic[T, U] ):
def __init__( self ) -> None:
UpperCamelCase__ = DoubleLinkedListNode(snake_case_ , snake_case_ )
UpperCamelCase__ = DoubleLinkedListNode(snake_case_ , snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = self.rear, self.head
def __repr__( self ) -> str:
UpperCamelCase__ = ['DoubleLinkedList']
UpperCamelCase__ = self.head
while node.next is not None:
rep.append(str(snake_case_ ) )
UpperCamelCase__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCamelCase__ = node
UpperCamelCase__ = previous
UpperCamelCase__ = node
UpperCamelCase__ = self.rear
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
UpperCamelCase__ = node.next
UpperCamelCase__ = node.prev
UpperCamelCase__ = None
UpperCamelCase__ = None
return node
class __lowerCamelCase ( Generic[T, U] ):
a : dict[Callable[[T], U], LRUCache[T, U]] ={}
def __init__( self , snake_case_ ) -> Tuple:
UpperCamelCase__ = DoubleLinkedList()
UpperCamelCase__ = capacity
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = {}
def __repr__( self ) -> str:
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , snake_case_ ) -> bool:
return key in self.cache
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
UpperCamelCase__ = self.cache[key]
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case_ )
return node.val
self.miss += 1
return None
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCamelCase__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCamelCase__ = DoubleLinkedListNode(snake_case_ , snake_case_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCamelCase__ = value
self.list.add(snake_case_ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case_ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(snake_case_ ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case_ ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCamelCase__ = LRUCache(snake_case_ )
UpperCamelCase__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCamelCase__ = func(*snake_case_ )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case_ , 'cache_info' , snake_case_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
A__ : Tuple= """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__ : int= """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
A__ : Union[str, Any]= False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=None , ) -> List[str]:
UpperCamelCase__ = size if size is not None else {'height': 20, 'width': 20}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = do_convert_rgb
UpperCamelCase__ = [512, 1024, 2048, 4096]
UpperCamelCase__ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
UpperCamelCase__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Dict =PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = PixaStructImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case_ , 'do_convert_rgb' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.image_processor_tester.prepare_dummy_image()
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase__ = 2048
UpperCamelCase__ = image_processor(snake_case_ , return_tensors='pt' , max_patches=snake_case_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
snake_case_ , return_tensors='pt' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
UpperCamelCase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case_ ):
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=snake_case_ ).flattened_patches
UpperCamelCase__ = 'Hello'
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=snake_case_ , header_text=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
snake_case_ , return_tensors='pt' , max_patches=snake_case_ , header_text=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
UpperCamelCase__ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
snake_case_ , return_tensors='pt' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
snake_case_ , return_tensors='pt' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Optional[Any] =PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCamelCase__ = 3
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case_ , 'do_convert_rgb' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
snake_case_ , return_tensors='pt' , max_patches=snake_case_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 20 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : Any= """src/diffusers"""
# Matches is_xxx_available()
A__ : Tuple= re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A__ : Any= re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A__ : Optional[Any]= """
{0} = None
"""
A__ : List[Any]= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A__ : Dict= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> str:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
UpperCamelCase__ = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F'dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[int]= parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20 | 1 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCamelCase ( _a , _a , _a ):
@register_to_config
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = False , ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = nn.Embedding(snake_case_ , snake_case_ )
UpperCamelCase__ = nn.Embedding(snake_case_ , snake_case_ )
UpperCamelCase__ = False
UpperCamelCase__ = nn.Dropout(p=snake_case_ )
UpperCamelCase__ = TaConfig(
vocab_size=snake_case_ , d_model=snake_case_ , num_heads=snake_case_ , d_kv=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ , feed_forward_proj=snake_case_ , is_decoder=snake_case_ , is_encoder_decoder=snake_case_ , )
UpperCamelCase__ = nn.ModuleList()
for lyr_num in range(snake_case_ ):
UpperCamelCase__ = TaBlock(snake_case_ )
self.encoders.append(snake_case_ )
UpperCamelCase__ = TaLayerNorm(snake_case_ )
UpperCamelCase__ = nn.Dropout(p=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.token_embedder(snake_case_ )
UpperCamelCase__ = encoder_input_tokens.shape[1]
UpperCamelCase__ = torch.arange(snake_case_ , device=encoder_input_tokens.device )
x += self.position_encoding(snake_case_ )
UpperCamelCase__ = self.dropout_pre(snake_case_ )
# inverted the attention mask
UpperCamelCase__ = encoder_input_tokens.size()
UpperCamelCase__ = self.get_extended_attention_mask(snake_case_ , snake_case_ )
for lyr in self.encoders:
UpperCamelCase__ = lyr(snake_case_ , snake_case_ )[0]
UpperCamelCase__ = self.layer_norm(snake_case_ )
return self.dropout_post(snake_case_ ), encoder_inputs_mask
| 20 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCamelCase ( _a ):
a : Tuple ="""dandelin/vilt-b32-finetuned-vqa"""
a : str =(
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
a : Optional[Any] ="""image_qa"""
a : Dict =AutoProcessor
a : int =AutoModelForVisualQuestionAnswering
a : str =["""image""", """text"""]
a : List[Any] =["""text"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Optional[Any]:
requires_backends(self , ['vision'] )
super().__init__(*snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
return self.pre_processor(snake_case_ , snake_case_ , return_tensors='pt' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Dict:
with torch.no_grad():
return self.model(**snake_case_ ).logits
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[Any]:
UpperCamelCase__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 20 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20 | 1 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( _a ):
a : str =(UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> Tuple:
UpperCamelCase__ = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**snake_case_ )
return config
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=snake_case_ , prev_timestep=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(variance_type='fixed_small_log' )
UpperCamelCase__ = scheduler_class(**snake_case_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(variance_type='learned_range' )
UpperCamelCase__ = scheduler_class(**snake_case_ )
UpperCamelCase__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=snake_case_ ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=snake_case_ ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=snake_case_ ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**snake_case_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
UpperCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(snake_case_ ):
# 1. predict noise residual
UpperCamelCase__ = model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(snake_case_ ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**snake_case_ )
scheduler.set_timesteps(25 )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
UpperCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(snake_case_ ):
# 1. predict noise residual
UpperCamelCase__ = model(snake_case_ , snake_case_ )
if i + 1 == timesteps.shape[0]:
UpperCamelCase__ = None
else:
UpperCamelCase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ = scheduler.step(
snake_case_ , snake_case_ , snake_case_ , prev_timestep=snake_case_ , generator=snake_case_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(snake_case_ ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
pass
| 20 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
UpperCamelCase__ = parent
UpperCamelCase__ = 13
UpperCamelCase__ = 7
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = 99
UpperCamelCase__ = 384
UpperCamelCase__ = 2
UpperCamelCase__ = 4
UpperCamelCase__ = 37
UpperCamelCase__ = 'gelu'
UpperCamelCase__ = 0.1
UpperCamelCase__ = 0.1
UpperCamelCase__ = 512
UpperCamelCase__ = 16
UpperCamelCase__ = 2
UpperCamelCase__ = 0.02
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 128
UpperCamelCase__ = 2
UpperCamelCase__ = 9
UpperCamelCase__ = 1
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertForMaskedLM(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForSequenceClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFConvBertForMultipleChoice(config=snake_case_ )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForTokenClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = TFConvBertForQuestionAnswering(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : str =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Any =False
a : Dict =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = TFConvBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = True
if hasattr(snake_case_ , 'use_cache' ):
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
UpperCamelCase__ = os.path.join(snake_case_ , 'saved_model' , '1' )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = outputs['encoder_hidden_states']
UpperCamelCase__ = outputs['encoder_attentions']
else:
UpperCamelCase__ = outputs['hidden_states']
UpperCamelCase__ = outputs['attentions']
self.assertEqual(len(snake_case_ ) , snake_case_ )
UpperCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
UpperCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(snake_case_ )[0]
UpperCamelCase__ = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 20 | 1 |
"""simple docstring"""
from manim import *
class __lowerCamelCase ( _a ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase__ = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase__ = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase__ = Text('CPU' , font_size=24 )
UpperCamelCase__ = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase__ = [mem.copy() for i in range(4 )]
UpperCamelCase__ = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase__ = Text('GPU' , font_size=24 )
UpperCamelCase__ = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.move_to([-1, -1, 0] )
self.add(snake_case_ )
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase__ = Text('Model' , font_size=24 )
UpperCamelCase__ = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.add(snake_case_ )
UpperCamelCase__ = []
for i, rect in enumerate(snake_case_ ):
rect.set_stroke(snake_case_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCamelCase__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=snake_case_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=snake_case_ , buff=0.0 )
self.add(snake_case_ )
cpu_targs.append(snake_case_ )
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase__ = Text('Loaded Checkpoint' , font_size=24 )
UpperCamelCase__ = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , aligned_edge=snake_case_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case_ , snake_case_ )
UpperCamelCase__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCamelCase__ = MarkupText(
F'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ ) , Write(snake_case_ ) )
self.play(Write(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) )
UpperCamelCase__ = []
UpperCamelCase__ = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase__ = fill.copy().set_fill(snake_case_ , opacity=0.7 )
target.move_to(snake_case_ )
first_animations.append(GrowFromCenter(snake_case_ , run_time=1 ) )
UpperCamelCase__ = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 20 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.dummy_uncond_unet
UpperCamelCase__ = PNDMScheduler()
UpperCamelCase__ = PNDMPipeline(unet=snake_case_ , scheduler=snake_case_ )
pndm.to(snake_case_ )
pndm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pndm(generator=snake_case_ , num_inference_steps=20 , output_type='numpy' ).images
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pndm(generator=snake_case_ , num_inference_steps=20 , output_type='numpy' , return_dict=snake_case_ )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = 'google/ddpm-cifar10-32'
UpperCamelCase__ = UNetaDModel.from_pretrained(snake_case_ )
UpperCamelCase__ = PNDMScheduler()
UpperCamelCase__ = PNDMPipeline(unet=snake_case_ , scheduler=snake_case_ )
pndm.to(snake_case_ )
pndm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pndm(generator=snake_case_ , output_type='numpy' ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 20 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCamelCase__ = UpperCamelCase__ = sum(array[:k] )
for i in range(len(SCREAMING_SNAKE_CASE ) - k ):
UpperCamelCase__ = current_sum - array[i] + array[i + k]
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
A__ : Optional[Any]= [randint(-10_00, 10_00) for i in range(1_00)]
A__ : List[Any]= randint(0, 1_10)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 20 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 | 1 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ : int= logging.get_logger(__name__)
A__ : Dict= torch.device("""cpu""")
def lowerCAmelCase_( ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = dct.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = val
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = []
for k in state_dict.keys():
UpperCamelCase__ = k
if ".pwconv" in k:
UpperCamelCase__ = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
UpperCamelCase__ = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
UpperCamelCase__ = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
UpperCamelCase__ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
UpperCamelCase__ = k_new.split('.' )
if ls[2].isdigit():
UpperCamelCase__ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
UpperCamelCase__ = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCamelCase__ = [3, 3, 6, 4]
UpperCamelCase__ = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
UpperCamelCase__ = [3, 3, 9, 6]
UpperCamelCase__ = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
UpperCamelCase__ = [4, 3, 10, 5]
UpperCamelCase__ = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
UpperCamelCase__ = [4, 4, 12, 6]
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
UpperCamelCase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
UpperCamelCase__ = checkpoint
UpperCamelCase__ = create_rename_keys(SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
UpperCamelCase__ = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(SCREAMING_SNAKE_CASE )
# prepare test inputs
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = ViTImageProcessor.from_pretrained('preprocessor_config' )
UpperCamelCase__ = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
UpperCamelCase__ = get_expected_output(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , SCREAMING_SNAKE_CASE , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
A__ : List[Any]= parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 20 | 1 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
A__ : List[Any]= importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
A__ : List[compression.BaseCompressedFileFileSystem]= [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if "://" in dataset_path:
UpperCamelCase__ = dataset_path.split('://' )[1]
return dataset_path
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ = not is_remote_filesystem(SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE ) , fs._strip_protocol(SCREAMING_SNAKE_CASE ) )
else:
fs.mv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , recursive=SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = threading.Lock()
| 20 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 | 1 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = None , snake_case_ = False , snake_case_ = None , snake_case_ = True , snake_case_ = "arrow" , **snake_case_ , ) -> str:
super().__init__(
split=snake_case_ , features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , **snake_case_ , )
UpperCamelCase__ = load_from_cache_file
UpperCamelCase__ = file_format
UpperCamelCase__ = Spark(
df=snake_case_ , features=snake_case_ , cache_dir=snake_case_ , working_dir=snake_case_ , **snake_case_ , )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCamelCase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=snake_case_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 20 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 | 1 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 20 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class __lowerCamelCase ( _a ):
def __init__( self ) -> Tuple:
# test for the above condition
self.test()
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = 0
UpperCamelCase__ = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase__ = self.advance()
if not self.does_advance(snake_case_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.update(snake_case_ )
counter += 1
if counter > 1_0000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=False ) -> Union[str, Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ ) -> List[Any]:
super(snake_case_ , self ).__init__()
if not isinstance(snake_case_ , snake_case_ ) or len(snake_case_ ) == 0:
raise ValueError(F'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(snake_case_ , snake_case_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
UpperCamelCase__ = token_ids
UpperCamelCase__ = len(self.token_ids )
UpperCamelCase__ = -1 # the index of the currently fulfilled step
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> str:
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(snake_case_ )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[Any]:
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(snake_case_ )}' )
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
if self.does_advance(snake_case_ ):
self.fulfilled_idx += 1
UpperCamelCase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase__ = True
UpperCamelCase__ = completed
else:
# failed to make progress.
UpperCamelCase__ = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = False
UpperCamelCase__ = 0
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=False ) -> int:
UpperCamelCase__ = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase__ = self.seqlen
UpperCamelCase__ = self.fulfilled_idx
UpperCamelCase__ = self.completed
return new_constraint
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=True ) -> List[Any]:
UpperCamelCase__ = max([len(snake_case_ ) for one in nested_token_ids] )
UpperCamelCase__ = {}
for token_ids in nested_token_ids:
UpperCamelCase__ = root
for tidx, token_id in enumerate(snake_case_ ):
if token_id not in level:
UpperCamelCase__ = {}
UpperCamelCase__ = level[token_id]
if no_subsets and self.has_subsets(snake_case_ , snake_case_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F' {nested_token_ids}.' )
UpperCamelCase__ = root
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = self.trie
for current_token in current_seq:
UpperCamelCase__ = start[current_token]
UpperCamelCase__ = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.next_tokens(snake_case_ )
return len(snake_case_ ) == 0
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = list(root.values() )
if len(snake_case_ ) == 0:
return 1
else:
return sum([self.count_leaves(snake_case_ ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = self.count_leaves(snake_case_ )
return len(snake_case_ ) != leaf_count
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ ) -> str:
super(snake_case_ , self ).__init__()
if not isinstance(snake_case_ , snake_case_ ) or len(snake_case_ ) == 0:
raise ValueError(F'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(snake_case_ , snake_case_ ) for token_ids in nested_token_ids ):
raise ValueError(F'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(snake_case_ , snake_case_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
UpperCamelCase__ = DisjunctiveTrie(snake_case_ )
UpperCamelCase__ = nested_token_ids
UpperCamelCase__ = self.trie.max_height
UpperCamelCase__ = []
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.trie.next_tokens(self.current_seq )
if len(snake_case_ ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[Any]:
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case_ )}' )
UpperCamelCase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[Any]:
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case_ )}' )
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
if self.does_advance(snake_case_ ):
self.current_seq.append(snake_case_ )
UpperCamelCase__ = True
else:
UpperCamelCase__ = True
self.reset()
UpperCamelCase__ = self.trie.reached_leaf(self.current_seq )
UpperCamelCase__ = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = False
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=False ) -> Optional[int]:
UpperCamelCase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase__ = self.seqlen
UpperCamelCase__ = self.current_seq
UpperCamelCase__ = self.completed
return new_constraint
class __lowerCamelCase :
def __init__( self , snake_case_ ) -> Any:
UpperCamelCase__ = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase__ = max([c.seqlen for c in constraints] )
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = []
UpperCamelCase__ = None
UpperCamelCase__ = [constraint.copy(stateful=snake_case_ ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase__ = constraint.advance()
if isinstance(snake_case_ , snake_case_ ):
token_list.append(snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
token_list.extend(snake_case_ )
else:
UpperCamelCase__ = self.inprogress_constraint.advance()
if isinstance(snake_case_ , snake_case_ ):
token_list.append(snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
token_list.extend(snake_case_ )
if len(snake_case_ ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase__ , UpperCamelCase__ = self.add(snake_case_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Union[str, Any]:
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError(F'`token_id` should be an `int`, but is `{token_id}`.' )
UpperCamelCase__ , UpperCamelCase__ = False, False
if self.completed:
UpperCamelCase__ = True
UpperCamelCase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.inprogress_constraint.update(snake_case_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=snake_case_ ) )
UpperCamelCase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(snake_case_ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = pending_constraint.update(snake_case_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(snake_case_ )
UpperCamelCase__ = None
if not complete and stepped:
UpperCamelCase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=True ) -> Union[str, Any]:
UpperCamelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase__ = [
constraint.copy(stateful=snake_case_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase__ = self.inprogress_constraint.copy(stateful=snake_case_ )
UpperCamelCase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 20 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( _a ):
a : Optional[Any] =(PNDMScheduler,)
a : Optional[int] =(("""num_inference_steps""", 5_0),)
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> Optional[int]:
UpperCamelCase__ = {
'num_train_timesteps': 1000,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**snake_case_ )
return config
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=0 , **snake_case_ ) -> int:
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop('num_inference_steps' , snake_case_ )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config(**snake_case_ )
UpperCamelCase__ = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
UpperCamelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
UpperCamelCase__ = scheduler_class.from_pretrained(snake_case_ )
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
UpperCamelCase__ = dummy_past_residuals[:]
UpperCamelCase__ = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
UpperCamelCase__ = new_scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase__ = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
UpperCamelCase__ = new_scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=0 , **snake_case_ ) -> List[str]:
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop('num_inference_steps' , snake_case_ )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
UpperCamelCase__ = scheduler_class.from_pretrained(snake_case_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ = dummy_past_residuals[:]
UpperCamelCase__ = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
UpperCamelCase__ = new_scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase__ = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
UpperCamelCase__ = new_scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(**snake_case_ )
UpperCamelCase__ = scheduler_class(**snake_case_ )
UpperCamelCase__ = 10
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCamelCase__ = model(snake_case_ , snake_case_ )
UpperCamelCase__ = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCamelCase__ = model(snake_case_ , snake_case_ )
UpperCamelCase__ = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop('num_inference_steps' , snake_case_ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**snake_case_ )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case_ , 'set_timesteps' ):
scheduler.set_timesteps(snake_case_ )
elif num_inference_steps is not None and not hasattr(snake_case_ , 'set_timesteps' ):
UpperCamelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCamelCase__ = dummy_past_residuals[:]
UpperCamelCase__ = scheduler.step_prk(snake_case_ , 0 , snake_case_ , **snake_case_ ).prev_sample
UpperCamelCase__ = scheduler.step_prk(snake_case_ , 1 , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase__ = scheduler.step_plms(snake_case_ , 0 , snake_case_ , **snake_case_ ).prev_sample
UpperCamelCase__ = scheduler.step_plms(snake_case_ , 1 , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case_ )
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__ = scheduler_class(**snake_case_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCamelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCamelCase__ = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
with self.assertRaises(snake_case_ ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**snake_case_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.full_loop()
UpperCamelCase__ = torch.sum(torch.abs(snake_case_ ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1E-2
assert abs(result_mean.item() - 0.2_580 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCamelCase__ = torch.sum(torch.abs(snake_case_ ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1E-2
assert abs(result_mean.item() - 0.0_878 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ = self.full_loop(set_alpha_to_one=snake_case_ , beta_start=0.01 )
UpperCamelCase__ = torch.sum(torch.abs(snake_case_ ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1E-2
assert abs(result_mean.item() - 0.2_995 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ = self.full_loop(set_alpha_to_one=snake_case_ , beta_start=0.01 )
UpperCamelCase__ = torch.sum(torch.abs(snake_case_ ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1E-2
assert abs(result_mean.item() - 0.2_434 ) < 1E-3
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
from math import factorial
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 20 ) -> int:
"""simple docstring"""
UpperCamelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCamelCase__ = n // 2
return int(factorial(SCREAMING_SNAKE_CASE ) / (factorial(SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
A__ : List[str]= int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase__ = name.replace('cls_token' , 'vit.embeddings.cls_token' )
if "mask_token" in name:
UpperCamelCase__ = name.replace('mask_token' , 'decoder.mask_token' )
if "decoder_pos_embed" in name:
UpperCamelCase__ = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase__ = name.replace('pos_embed' , 'vit.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'vit.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'vit.embeddings.norm' )
if "decoder_blocks" in name:
UpperCamelCase__ = name.replace('decoder_blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
UpperCamelCase__ = name.replace('blocks' , 'vit.encoder.layer' )
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
UpperCamelCase__ = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
UpperCamelCase__ = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
UpperCamelCase__ = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase__ = name.replace('norm.weight' , 'vit.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase__ = name.replace('norm.bias' , 'vit.layernorm.bias' )
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase__ = config.decoder_hidden_size
UpperCamelCase__ = 'decoder.decoder_layers.'
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[dim : dim * 2, :]
UpperCamelCase__ = val[-dim:, :]
elif "bias" in key:
UpperCamelCase__ = val[:dim]
UpperCamelCase__ = val[dim : dim * 2]
UpperCamelCase__ = val[-dim:]
else:
UpperCamelCase__ = config.hidden_size
UpperCamelCase__ = 'vit.encoder.layer.'
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[dim : dim * 2, :]
UpperCamelCase__ = val[-dim:, :]
elif "bias" in key:
UpperCamelCase__ = val[:dim]
UpperCamelCase__ = val[dim : dim * 2]
UpperCamelCase__ = val[-dim:]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase__ = 10_24
UpperCamelCase__ = 40_96
UpperCamelCase__ = 24
UpperCamelCase__ = 16
elif "huge" in checkpoint_url:
UpperCamelCase__ = 14
UpperCamelCase__ = 12_80
UpperCamelCase__ = 51_20
UpperCamelCase__ = 32
UpperCamelCase__ = 16
UpperCamelCase__ = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
UpperCamelCase__ = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase__ = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = 'https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase__ = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase__ = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase__ = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : int= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : List[Any]= parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 20 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : List[Any]= ["""bert-base-uncased""", """bert-base-cased"""]
A__ : Optional[int]= """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __lowerCamelCase ( tf.keras.Model ):
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase__ = TFAutoModel.from_config(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = self.tokenizer(snake_case_ )
UpperCamelCase__ = self.bert(**snake_case_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case_ , use_fast_bert_tokenizer=snake_case_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(snake_case_ , return_tensors='tf' , padding='longest' )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(snake_case_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(snake_case_ )
UpperCamelCase__ = compiled_tokenizer(snake_case_ )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(snake_case_ ) / 'saved.model'
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = loaded_model(snake_case_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 20 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : List[Any]= ["""bert-base-uncased""", """bert-base-cased"""]
A__ : Optional[int]= """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __lowerCamelCase ( tf.keras.Model ):
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase__ = TFAutoModel.from_config(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = self.tokenizer(snake_case_ )
UpperCamelCase__ = self.bert(**snake_case_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case_ , use_fast_bert_tokenizer=snake_case_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(snake_case_ , return_tensors='tf' , padding='longest' )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(snake_case_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(snake_case_ )
UpperCamelCase__ = compiled_tokenizer(snake_case_ )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(snake_case_ ) / 'saved.model'
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = loaded_model(snake_case_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20 | 1 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A__ : Optional[int]= logging.get_logger(__name__)
@add_end_docstrings(_a )
class __lowerCamelCase ( _a ):
def __init__( self , *snake_case_ , **snake_case_ ) -> str:
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = {}, {}
if padding is not None:
UpperCamelCase__ = padding
if truncation is not None:
UpperCamelCase__ = truncation
if top_k is not None:
UpperCamelCase__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , snake_case_ , snake_case_ = None , **snake_case_ ) -> Any:
if isinstance(snake_case_ , (Image.Image, str) ) and isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = {'image': image, 'question': question}
else:
UpperCamelCase__ = image
UpperCamelCase__ = super().__call__(snake_case_ , **snake_case_ )
return results
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_=False , snake_case_=False ) -> Tuple:
UpperCamelCase__ = load_image(inputs['image'] )
UpperCamelCase__ = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=snake_case_ , truncation=snake_case_ )
UpperCamelCase__ = self.image_processor(images=snake_case_ , return_tensors=self.framework )
model_inputs.update(snake_case_ )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.model(**snake_case_ )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_=5 ) -> int:
if top_k > self.model.config.num_labels:
UpperCamelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase__ = model_outputs.logits.sigmoid()[0]
UpperCamelCase__ , UpperCamelCase__ = probs.topk(snake_case_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
UpperCamelCase__ = scores.tolist()
UpperCamelCase__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
| 20 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 20 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__ : List[str]= logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a : str
a : List[str]
a : Optional[List[str]]
@dataclass
class __lowerCamelCase :
a : List[int]
a : List[int]
a : Optional[List[int]] =None
a : Optional[List[int]] =None
class __lowerCamelCase ( _a ):
a : Optional[Any] ="""train"""
a : List[Any] ="""dev"""
a : Optional[Any] ="""test"""
class __lowerCamelCase :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]:
raise NotImplementedError
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , snake_case_="[CLS]" , snake_case_=1 , snake_case_="[SEP]" , snake_case_=False , snake_case_=False , snake_case_=0 , snake_case_=0 , snake_case_=-100 , snake_case_=0 , snake_case_=True , ) -> List[InputFeatures]:
UpperCamelCase__ = {label: i for i, label in enumerate(snake_case_ )}
UpperCamelCase__ = []
for ex_index, example in enumerate(snake_case_ ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d of %d' , snake_case_ , len(snake_case_ ) )
UpperCamelCase__ = []
UpperCamelCase__ = []
for word, label in zip(example.words , example.labels ):
UpperCamelCase__ = tokenizer.tokenize(snake_case_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(snake_case_ ) > 0:
tokens.extend(snake_case_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(snake_case_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
UpperCamelCase__ = tokenizer.num_special_tokens_to_add()
if len(snake_case_ ) > max_seq_length - special_tokens_count:
UpperCamelCase__ = tokens[: (max_seq_length - special_tokens_count)]
UpperCamelCase__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
UpperCamelCase__ = [sequence_a_segment_id] * len(snake_case_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
UpperCamelCase__ = [cls_token] + tokens
UpperCamelCase__ = [pad_token_label_id] + label_ids
UpperCamelCase__ = [cls_token_segment_id] + segment_ids
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(snake_case_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
UpperCamelCase__ = [1 if mask_padding_with_zero else 0] * len(snake_case_ )
# Zero-pad up to the sequence length.
UpperCamelCase__ = max_seq_length - len(snake_case_ )
if pad_on_left:
UpperCamelCase__ = ([pad_token] * padding_length) + input_ids
UpperCamelCase__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
UpperCamelCase__ = ([pad_token_segment_id] * padding_length) + segment_ids
UpperCamelCase__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(snake_case_ ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(snake_case_ ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(snake_case_ ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(snake_case_ ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(snake_case_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
UpperCamelCase__ = None
features.append(
InputFeatures(
input_ids=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , label_ids=snake_case_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __lowerCamelCase ( _a ):
a : List[InputFeatures]
a : int =nn.CrossEntropyLoss().ignore_index
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_=False , snake_case_ = Split.train , ) -> Tuple:
# Load data features from cache or dataset file
UpperCamelCase__ = os.path.join(
snake_case_ , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(snake_case_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ = cached_features_file + '.lock'
with FileLock(snake_case_ ):
if os.path.exists(snake_case_ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
UpperCamelCase__ = torch.load(snake_case_ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
UpperCamelCase__ = token_classification_task.read_examples_from_file(snake_case_ , snake_case_ )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCamelCase__ = token_classification_task.convert_examples_to_features(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case_ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features , snake_case_ )
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , snake_case_ ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __lowerCamelCase :
a : List[InputFeatures]
a : int =-1_0_0
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_=False , snake_case_ = Split.train , ) -> List[Any]:
UpperCamelCase__ = token_classification_task.read_examples_from_file(snake_case_ , snake_case_ )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCamelCase__ = token_classification_task.convert_examples_to_features(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case_ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
UpperCamelCase__ = tf.data.Dataset.from_generator(
snake_case_ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
UpperCamelCase__ = tf.data.Dataset.from_generator(
snake_case_ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , snake_case_ ) -> InputFeatures:
return self.features[i]
| 20 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : str= {"""vocab_file""": """spiece.model"""}
A__ : Union[str, Any]= {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ : Union[str, Any]= {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
A__ : Optional[Any]= """▁"""
class __lowerCamelCase ( _a ):
a : Dict =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=100 , snake_case_=None , snake_case_ = None , snake_case_=True , **snake_case_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case_ : bool('extra_id' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case_ , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case_ )) + [1]
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return list(
set(filter(lambda snake_case_ : bool(re.search(r'<extra_id_\d+>' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return [self._convert_token_to_id(snake_case_ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[int]:
if len(snake_case_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , snake_case_ ) -> Any:
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(snake_case_ , ' ' )
return super().tokenize(snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[Any]:
if not self.legacy:
UpperCamelCase__ = text.startswith(snake_case_ )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case_ ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
if token.startswith('<extra_id_' ):
UpperCamelCase__ = re.match(r'<extra_id_(\d+)>' , snake_case_ )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(snake_case_ )
else:
UpperCamelCase__ = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = []
UpperCamelCase__ = ''
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(snake_case_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , 'wb' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __lowerCamelCase :
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = num_of_nodes
UpperCamelCase__ = []
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCamelCase__ = self.find_component(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
if component_size[u_node] <= component_size[v_node]:
UpperCamelCase__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case_ )
elif component_size[u_node] >= component_size[v_node]:
UpperCamelCase__ = self.find_component(snake_case_ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCamelCase__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
UpperCamelCase__ = self.m_component[u]
UpperCamelCase__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCamelCase__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
UpperCamelCase__ = self.m_component[u]
UpperCamelCase__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case_ , snake_case_ , snake_case_ )
print(F'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
UpperCamelCase__ = [-1] * self.m_num_of_nodes
print(F'The total weight of the minimal spanning tree is: {mst_weight}' )
def lowerCAmelCase_( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A__ : Any= TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(snake_case_ ) , self.find_set(snake_case_ ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# add an edge with the given weight
self.add_node(snake_case_ )
self.add_node(snake_case_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ , snake_case_ , snake_case_ )
disjoint_set.union(snake_case_ , snake_case_ )
return graph
| 20 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=_a ):
a : Tuple =["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> List[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ['flax'] )
class __lowerCamelCase ( metaclass=_a ):
a : Union[str, Any] =["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> int:
requires_backends(cls , ['flax'] )
class __lowerCamelCase ( metaclass=_a ):
a : Optional[int] =["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Optional[int]:
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> List[str]:
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> int:
requires_backends(cls , ['flax'] )
class __lowerCamelCase ( metaclass=_a ):
a : List[str] =["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> List[str]:
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
class __lowerCamelCase ( metaclass=_a ):
a : List[Any] =["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> List[str]:
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Optional[int]:
requires_backends(cls , ['flax'] )
class __lowerCamelCase ( metaclass=_a ):
a : Any =["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> List[Any]:
requires_backends(cls , ['flax'] )
class __lowerCamelCase ( metaclass=_a ):
a : Any =["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> int:
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> str:
requires_backends(cls , ['flax'] )
class __lowerCamelCase ( metaclass=_a ):
a : int =["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(cls , ['flax'] )
class __lowerCamelCase ( metaclass=_a ):
a : str =["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> str:
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Dict:
requires_backends(cls , ['flax'] )
class __lowerCamelCase ( metaclass=_a ):
a : Any =["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Dict:
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Dict:
requires_backends(cls , ['flax'] )
class __lowerCamelCase ( metaclass=_a ):
a : Any =["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> str:
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> str:
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
class __lowerCamelCase ( metaclass=_a ):
a : List[str] =["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Optional[int]:
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Dict:
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(cls , ['flax'] )
class __lowerCamelCase ( metaclass=_a ):
a : int =["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> List[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , *snake_case_ , **snake_case_ ) -> List[Any]:
requires_backends(cls , ['flax'] )
| 20 |
"""simple docstring"""
A__ : Tuple= """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__ : int= """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 | 1 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
A__ : List[Any]= get_logger(__name__)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=None ) -> List[str]:
UpperCamelCase__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , snake_case_ , getattr(snake_case_ , snake_case_ ) )
UpperCamelCase__ = module._original_module if isinstance(snake_case_ , _PatchedModuleObj ) else module
class __lowerCamelCase :
a : Any =[]
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> List[Any]:
UpperCamelCase__ = obj
UpperCamelCase__ = target
UpperCamelCase__ = new
UpperCamelCase__ = target.split('.' )[0]
UpperCamelCase__ = {}
UpperCamelCase__ = attrs or []
def __enter__( self ) -> Optional[int]:
*UpperCamelCase__ , UpperCamelCase__ = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(snake_case_ ) ):
try:
UpperCamelCase__ = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
UpperCamelCase__ = getattr(self.obj , snake_case_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(snake_case_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
UpperCamelCase__ = obj_attr
# patch at top level
setattr(self.obj , snake_case_ , _PatchedModuleObj(snake_case_ , attrs=self.attrs ) )
UpperCamelCase__ = getattr(self.obj , snake_case_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(snake_case_ , snake_case_ , _PatchedModuleObj(getattr(snake_case_ , snake_case_ , snake_case_ ) , attrs=self.attrs ) )
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
# finally set the target attribute
setattr(snake_case_ , snake_case_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
UpperCamelCase__ = getattr(import_module('.'.join(snake_case_ ) ) , snake_case_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , snake_case_ ) is attr_value:
UpperCamelCase__ = getattr(self.obj , snake_case_ )
setattr(self.obj , snake_case_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
UpperCamelCase__ = globals()['__builtins__'][target_attr]
setattr(self.obj , snake_case_ , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self , *snake_case_ ) -> str:
for attr in list(self.original ):
setattr(self.obj , snake_case_ , self.original.pop(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.__enter__()
self._active_patches.append(self )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : Any= """src/diffusers"""
# Matches is_xxx_available()
A__ : Tuple= re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A__ : Any= re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A__ : Optional[Any]= """
{0} = None
"""
A__ : List[Any]= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A__ : Dict= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> str:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
UpperCamelCase__ = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F'dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[int]= parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Optional[int]= logging.get_logger(__name__)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class __lowerCamelCase ( _a ):
a : Tuple =["""pixel_values"""]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = None , snake_case_ = True , snake_case_ = 1 / 255 , snake_case_ = True , snake_case_ = True , snake_case_ = None , snake_case_ = None , **snake_case_ , ) -> None:
super().__init__(**snake_case_ )
UpperCamelCase__ = size if size is not None else {'shortest_edge': 256}
UpperCamelCase__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
UpperCamelCase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase__ = get_size_dict(snake_case_ , param_name='crop_size' )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = offset
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
UpperCamelCase__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" in size:
UpperCamelCase__ = get_resize_output_image_size(snake_case_ , size['shortest_edge'] , default_to_square=snake_case_ )
elif "height" in size and "width" in size:
UpperCamelCase__ = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
UpperCamelCase__ = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(snake_case_ , size=(size['height'], size['width']) , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> str:
UpperCamelCase__ = image.astype(np.floataa )
if offset:
UpperCamelCase__ = image - (scale / 2)
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
UpperCamelCase__ = to_numpy_array(snake_case_ )
if do_resize:
UpperCamelCase__ = self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ )
if do_center_crop:
UpperCamelCase__ = self.center_crop(snake_case_ , size=snake_case_ )
if do_rescale:
UpperCamelCase__ = self.rescale(image=snake_case_ , scale=snake_case_ , offset=snake_case_ )
if do_normalize:
UpperCamelCase__ = self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ )
UpperCamelCase__ = to_channel_dimension_format(snake_case_ , snake_case_ )
return image
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ) -> PIL.Image.Image:
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = offset if offset is not None else self.offset
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(snake_case_ , param_name='crop_size' )
if not valid_images(snake_case_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCamelCase__ = make_batched(snake_case_ )
UpperCamelCase__ = [
[
self._preprocess_image(
image=snake_case_ , do_resize=snake_case_ , size=snake_case_ , resample=snake_case_ , do_center_crop=snake_case_ , crop_size=snake_case_ , do_rescale=snake_case_ , rescale_factor=snake_case_ , offset=snake_case_ , do_normalize=snake_case_ , image_mean=snake_case_ , image_std=snake_case_ , data_format=snake_case_ , )
for img in video
]
for video in videos
]
UpperCamelCase__ = {'pixel_values': videos}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 20 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A__ : Tuple= abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE , id=SCREAMING_SNAKE_CASE )
| 20 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
A__ : Tuple= {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_ ) -> None:
UpperCamelCase__ = graph
# mapping node to its parent in resulting breadth first tree
UpperCamelCase__ = {}
UpperCamelCase__ = source_vertex
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = {self.source_vertex}
UpperCamelCase__ = None
UpperCamelCase__ = [self.source_vertex] # first in first out queue
while queue:
UpperCamelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case_ )
UpperCamelCase__ = vertex
queue.append(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCamelCase__ = self.parent.get(snake_case_ )
if target_vertex_parent is None:
UpperCamelCase__ = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(snake_case_ )
return self.shortest_path(snake_case_ ) + F'->{target_vertex}'
if __name__ == "__main__":
A__ : Optional[int]= Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 20 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20 | 1 |
"""simple docstring"""
import math
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
UpperCamelCase__ = [True] * n
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCamelCase__ = i * 2
while index < n:
UpperCamelCase__ = False
UpperCamelCase__ = index + i
UpperCamelCase__ = [2]
for i in range(3 , SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE )
return primes
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 99_99_66_66_33_33 ) -> int:
"""simple docstring"""
UpperCamelCase__ = math.floor(math.sqrt(SCREAMING_SNAKE_CASE ) ) + 1_00
UpperCamelCase__ = prime_sieve(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = primes[prime_index]
while (last_prime**2) <= limit:
UpperCamelCase__ = primes[prime_index + 1]
UpperCamelCase__ = last_prime**2
UpperCamelCase__ = next_prime**2
# Get numbers divisible by lps(current)
UpperCamelCase__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCamelCase__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCamelCase__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCamelCase__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 20 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
UpperCamelCase__ = parent
UpperCamelCase__ = 13
UpperCamelCase__ = 7
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = 99
UpperCamelCase__ = 384
UpperCamelCase__ = 2
UpperCamelCase__ = 4
UpperCamelCase__ = 37
UpperCamelCase__ = 'gelu'
UpperCamelCase__ = 0.1
UpperCamelCase__ = 0.1
UpperCamelCase__ = 512
UpperCamelCase__ = 16
UpperCamelCase__ = 2
UpperCamelCase__ = 0.02
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 128
UpperCamelCase__ = 2
UpperCamelCase__ = 9
UpperCamelCase__ = 1
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertForMaskedLM(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForSequenceClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFConvBertForMultipleChoice(config=snake_case_ )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForTokenClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = TFConvBertForQuestionAnswering(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : str =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Any =False
a : Dict =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = TFConvBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = True
if hasattr(snake_case_ , 'use_cache' ):
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
UpperCamelCase__ = os.path.join(snake_case_ , 'saved_model' , '1' )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = outputs['encoder_hidden_states']
UpperCamelCase__ = outputs['encoder_attentions']
else:
UpperCamelCase__ = outputs['hidden_states']
UpperCamelCase__ = outputs['attentions']
self.assertEqual(len(snake_case_ ) , snake_case_ )
UpperCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
UpperCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(snake_case_ )[0]
UpperCamelCase__ = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 20 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : int= logging.get_logger(__name__)
A__ : List[Any]= {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : Union[str, Any] ="""deta"""
a : Union[str, Any] ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case_=None , snake_case_=900 , snake_case_=2048 , snake_case_=6 , snake_case_=2048 , snake_case_=8 , snake_case_=6 , snake_case_=1024 , snake_case_=8 , snake_case_=0.0 , snake_case_=True , snake_case_="relu" , snake_case_=256 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1.0 , snake_case_=True , snake_case_=False , snake_case_="sine" , snake_case_=5 , snake_case_=4 , snake_case_=4 , snake_case_=True , snake_case_=300 , snake_case_=True , snake_case_=True , snake_case_=1 , snake_case_=5 , snake_case_=2 , snake_case_=1 , snake_case_=1 , snake_case_=5 , snake_case_=2 , snake_case_=0.1 , snake_case_=0.25 , **snake_case_ , ) -> List[str]:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCamelCase__ = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = backbone_config.pop('model_type' )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(snake_case_ )
UpperCamelCase__ = backbone_config
UpperCamelCase__ = num_queries
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = auxiliary_loss
UpperCamelCase__ = position_embedding_type
# deformable attributes
UpperCamelCase__ = num_feature_levels
UpperCamelCase__ = encoder_n_points
UpperCamelCase__ = decoder_n_points
UpperCamelCase__ = two_stage
UpperCamelCase__ = two_stage_num_proposals
UpperCamelCase__ = with_box_refine
UpperCamelCase__ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = mask_loss_coefficient
UpperCamelCase__ = dice_loss_coefficient
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
UpperCamelCase__ = focal_alpha
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 20 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape
UpperCamelCase__ = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
UpperCamelCase__ = {}
for old_key in state_dict.keys():
UpperCamelCase__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCamelCase__ = key.replace('moe_layer.experts.0' , F'ffn.experts.expert_{expert_idx}' )
else:
UpperCamelCase__ = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
UpperCamelCase__ = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
UpperCamelCase__ = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
UpperCamelCase__ = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
UpperCamelCase__ = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
UpperCamelCase__ = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
UpperCamelCase__ = key.replace('final_layer_norm' , 'ff_layer_norm' )
UpperCamelCase__ = state_dict[old_key]
return new_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = 0
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
for expert in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = switch_checkpoint_path + F'-rank-{expert}.pt'
if os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE )['model']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = rename_fairseq_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F'-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin' ) )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(SCREAMING_SNAKE_CASE )[0]].dtype )
# Add the last block
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F'-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin' ) )
UpperCamelCase__ = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = rename_fairseq_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(SCREAMING_SNAKE_CASE ) == 1:
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Otherwise, let's build the index
UpperCamelCase__ = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = weights_name.replace('.bin' , F'-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE ):05d}.bin' )
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
for key in shard:
UpperCamelCase__ = shard_file
# Add the metadata
UpperCamelCase__ = {'total_size': total_size}
UpperCamelCase__ = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 'w' , encoding='utf-8' ) as f:
UpperCamelCase__ = json.dumps(SCREAMING_SNAKE_CASE , indent=2 , sort_keys=SCREAMING_SNAKE_CASE ) + '\n'
f.write(SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
A__ : int= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
A__ : Tuple= parser.parse_args()
A__, A__ : List[str]= shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
A__ : int= NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
A__ : Dict= NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 20 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20 | 1 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A__ : str= sys.version_info >= (3, 10)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class __lowerCamelCase :
a : int
a : float
a : str
a : bool
@dataclass
class __lowerCamelCase :
a : int =4_2
a : str =field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class __lowerCamelCase :
a : bool =False
a : bool =True
a : Optional[bool] =None
class __lowerCamelCase ( _a ):
a : Dict ="""titi"""
a : Optional[int] ="""toto"""
class __lowerCamelCase ( _a ):
a : Optional[Any] ="""titi"""
a : List[Any] ="""toto"""
a : Tuple =4_2
@dataclass
class __lowerCamelCase :
a : BasicEnum ="toto"
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = BasicEnum(self.foo )
@dataclass
class __lowerCamelCase :
a : MixedTypeEnum ="toto"
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = MixedTypeEnum(self.foo )
@dataclass
class __lowerCamelCase :
a : Optional[int] =None
a : Optional[float] =field(default=_a , metadata={"""help""": """help message"""} )
a : Optional[str] =None
a : Optional[List[str]] =list_field(default=[] )
a : Optional[List[int]] =list_field(default=[] )
@dataclass
class __lowerCamelCase :
a : List[int] =list_field(default=[] )
a : List[int] =list_field(default=[1, 2, 3] )
a : List[str] =list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
a : List[float] =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __lowerCamelCase :
a : List[int] =field()
a : str =field()
a : BasicEnum =field()
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = BasicEnum(self.required_enum )
@dataclass
class __lowerCamelCase :
a : int
a : "BasicEnum" =field()
a : "Optional[bool]" =None
a : "str" =field(default="""toto""" , metadata={"""help""": """help message"""} )
a : "List[str]" =list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class __lowerCamelCase :
a : bool =False
a : bool =True
a : bool | None =None
@dataclass
class __lowerCamelCase :
a : int | None =None
a : float | None =field(default=_a , metadata={"""help""": """help message"""} )
a : str | None =None
a : list[str] | None =list_field(default=[] )
a : list[int] | None =list_field(default=[] )
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Union[str, Any]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase__ = {k: v for k, v in vars(snake_case_ ).items() if k != 'container'}
UpperCamelCase__ = {k: v for k, v in vars(snake_case_ ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , snake_case_ ) and yy.get('choices' , snake_case_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](snake_case_ ) , yy['type'](snake_case_ ) )
del xx["type"], yy["type"]
self.assertEqual(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = HfArgumentParser(snake_case_ )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=snake_case_ , required=snake_case_ )
expected.add_argument('--bar' , type=snake_case_ , required=snake_case_ )
expected.add_argument('--baz' , type=snake_case_ , required=snake_case_ )
expected.add_argument('--flag' , type=snake_case_ , default=snake_case_ , const=snake_case_ , nargs='?' )
self.argparsersEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((UpperCamelCase__) , ) = parser.parse_args_into_dataclasses(snake_case_ , look_for_args_file=snake_case_ )
self.assertFalse(example.flag )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = HfArgumentParser(snake_case_ )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=snake_case_ )
expected.add_argument('--baz' , default='toto' , type=snake_case_ , help='help message' )
self.argparsersEqual(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=snake_case_ , default=snake_case_ , const=snake_case_ , nargs='?' )
expected.add_argument('--baz' , type=snake_case_ , default=snake_case_ , const=snake_case_ , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=snake_case_ , dest='baz' )
expected.add_argument('--opt' , type=snake_case_ , default=snake_case_ )
UpperCamelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(snake_case_ )
for dataclass_type in dataclass_types:
UpperCamelCase__ = HfArgumentParser(snake_case_ )
self.argparsersEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(snake_case_ , Namespace(foo=snake_case_ , baz=snake_case_ , opt=snake_case_ ) )
UpperCamelCase__ = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(snake_case_ , Namespace(foo=snake_case_ , baz=snake_case_ , opt=snake_case_ ) )
UpperCamelCase__ = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(snake_case_ , Namespace(foo=snake_case_ , baz=snake_case_ , opt=snake_case_ ) )
UpperCamelCase__ = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(snake_case_ , Namespace(foo=snake_case_ , baz=snake_case_ , opt=snake_case_ ) )
UpperCamelCase__ = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(snake_case_ , Namespace(foo=snake_case_ , baz=snake_case_ , opt=snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = HfArgumentParser(snake_case_ )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
UpperCamelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase__ = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
UpperCamelCase__ = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase__ = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
UpperCamelCase__ = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
@dataclass
class __lowerCamelCase :
a : Literal["titi", "toto", 4_2] ="toto"
UpperCamelCase__ = HfArgumentParser(snake_case_ )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
UpperCamelCase__ = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
UpperCamelCase__ = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = HfArgumentParser(snake_case_ )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=snake_case_ )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=snake_case_ )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=snake_case_ )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=snake_case_ )
self.argparsersEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(
snake_case_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase__ = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(snake_case_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , default=snake_case_ , type=snake_case_ )
expected.add_argument('--bar' , default=snake_case_ , type=snake_case_ , help='help message' )
expected.add_argument('--baz' , default=snake_case_ , type=snake_case_ )
expected.add_argument('--ces' , nargs='+' , default=[] , type=snake_case_ )
expected.add_argument('--des' , nargs='+' , default=[] , type=snake_case_ )
UpperCamelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(snake_case_ )
for dataclass_type in dataclass_types:
UpperCamelCase__ = HfArgumentParser(snake_case_ )
self.argparsersEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(snake_case_ , Namespace(foo=snake_case_ , bar=snake_case_ , baz=snake_case_ , ces=[] , des=[] ) )
UpperCamelCase__ = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(snake_case_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = HfArgumentParser(snake_case_ )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=snake_case_ , required=snake_case_ )
expected.add_argument('--required_str' , type=snake_case_ , required=snake_case_ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=snake_case_ , )
self.argparsersEqual(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = HfArgumentParser(snake_case_ )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument('--foo' , type=snake_case_ , required=snake_case_ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=snake_case_ , )
expected.add_argument('--opt' , type=snake_case_ , default=snake_case_ )
expected.add_argument('--baz' , default='toto' , type=snake_case_ , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=snake_case_ )
self.argparsersEqual(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = HfArgumentParser(snake_case_ )
UpperCamelCase__ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
UpperCamelCase__ = parser.parse_dict(snake_case_ )[0]
UpperCamelCase__ = BasicExample(**snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = HfArgumentParser(snake_case_ )
UpperCamelCase__ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(snake_case_ , parser.parse_dict , snake_case_ , allow_extra_keys=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = HfArgumentParser(snake_case_ )
UpperCamelCase__ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(snake_case_ , 'temp_json' )
os.mkdir(snake_case_ )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(snake_case_ , snake_case_ )
UpperCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
UpperCamelCase__ = BasicExample(**snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = HfArgumentParser(snake_case_ )
UpperCamelCase__ = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(snake_case_ , 'temp_yaml' )
os.mkdir(snake_case_ )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(snake_case_ , snake_case_ )
UpperCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
UpperCamelCase__ = BasicExample(**snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = HfArgumentParser(snake_case_ )
self.assertIsNotNone(snake_case_ )
| 20 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Union[str, Any]= {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str= ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str= [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int= [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any]= [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
A__ : int= _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20 |
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = sum(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
UpperCamelCase__ = True
for i in range(1 , s + 1 ):
UpperCamelCase__ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
UpperCamelCase__ = dp[i][j - 1]
if arr[i - 1] <= j:
UpperCamelCase__ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
UpperCamelCase__ = s - 2 * j
break
return diff
| 20 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 20 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : Tuple= {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __lowerCamelCase ( _a ):
a : List[str] ="""beit"""
def __init__( self , snake_case_=8192 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=224 , snake_case_=16 , snake_case_=3 , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=0.1 , snake_case_=0.1 , snake_case_=True , snake_case_=[3, 5, 7, 11] , snake_case_=[1, 2, 3, 6] , snake_case_=True , snake_case_=0.4 , snake_case_=256 , snake_case_=1 , snake_case_=False , snake_case_=255 , **snake_case_ , ) -> Any:
super().__init__(**snake_case_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = use_mask_token
UpperCamelCase__ = use_absolute_position_embeddings
UpperCamelCase__ = use_relative_position_bias
UpperCamelCase__ = use_shared_relative_position_bias
UpperCamelCase__ = layer_scale_init_value
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCamelCase__ = out_indices
UpperCamelCase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCamelCase__ = use_auxiliary_head
UpperCamelCase__ = auxiliary_loss_weight
UpperCamelCase__ = auxiliary_channels
UpperCamelCase__ = auxiliary_num_convs
UpperCamelCase__ = auxiliary_concat_input
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
| 20 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=True , snake_case_=1 / 255 , snake_case_=True , ) -> Any:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCamelCase__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_pad
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
UpperCamelCase__ = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ = image.size
else:
UpperCamelCase__ , UpperCamelCase__ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ = int(self.size['shortest_edge'] * h / w )
UpperCamelCase__ = self.size['shortest_edge']
elif w > h:
UpperCamelCase__ = self.size['shortest_edge']
UpperCamelCase__ = int(self.size['shortest_edge'] * w / h )
else:
UpperCamelCase__ = self.size['shortest_edge']
UpperCamelCase__ = self.size['shortest_edge']
else:
UpperCamelCase__ = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
UpperCamelCase__ = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Tuple =YolosImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = YolosImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case_ , 'image_std' ) )
self.assertTrue(hasattr(snake_case_ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case_ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case_ , 'size' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , snake_case_ )
UpperCamelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
# Initialize image_processings
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase__ = self.image_processing_class(do_resize=snake_case_ , do_normalize=snake_case_ , do_rescale=snake_case_ )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCamelCase__ = image_processing_a.pad(snake_case_ , return_tensors='pt' )
UpperCamelCase__ = image_processing_a(snake_case_ , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
# prepare image and target
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
UpperCamelCase__ = json.loads(f.read() )
UpperCamelCase__ = {'image_id': 3_9769, 'annotations': target}
# encode them
UpperCamelCase__ = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
UpperCamelCase__ = image_processing(images=snake_case_ , annotations=snake_case_ , return_tensors='pt' )
# verify pixel values
UpperCamelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case_ )
UpperCamelCase__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
UpperCamelCase__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case_ ) )
# verify boxes
UpperCamelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case_ )
UpperCamelCase__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
UpperCamelCase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case_ ) )
# verify is_crowd
UpperCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case_ ) )
# verify class_labels
UpperCamelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case_ ) )
# verify orig_size
UpperCamelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case_ ) )
# verify size
UpperCamelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
# prepare image, target and masks_path
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
UpperCamelCase__ = json.loads(f.read() )
UpperCamelCase__ = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
UpperCamelCase__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
UpperCamelCase__ = YolosImageProcessor(format='coco_panoptic' )
UpperCamelCase__ = image_processing(images=snake_case_ , annotations=snake_case_ , masks_path=snake_case_ , return_tensors='pt' )
# verify pixel values
UpperCamelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case_ )
UpperCamelCase__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
UpperCamelCase__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case_ ) )
# verify boxes
UpperCamelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case_ )
UpperCamelCase__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
UpperCamelCase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case_ ) )
# verify is_crowd
UpperCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case_ ) )
# verify class_labels
UpperCamelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case_ ) )
# verify masks
UpperCamelCase__ = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case_ )
# verify orig_size
UpperCamelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case_ ) )
# verify size
UpperCamelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case_ ) )
| 20 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 | 1 |
"""simple docstring"""
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=None , snake_case_=None ) -> Optional[Any]:
UpperCamelCase__ = data
UpperCamelCase__ = previous
UpperCamelCase__ = next_node
def __str__( self ) -> str:
return F'{self.data}'
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return self.data
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
return self.next
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return self.previous
class __lowerCamelCase :
def __init__( self , snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = head
def __iter__( self ) -> int:
return self
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
if not self.current:
raise StopIteration
else:
UpperCamelCase__ = self.current.get_data()
UpperCamelCase__ = self.current.get_next()
return value
class __lowerCamelCase :
def __init__( self ) -> str:
UpperCamelCase__ = None # First node in list
UpperCamelCase__ = None # Last node in list
def __str__( self ) -> List[Any]:
UpperCamelCase__ = self.head
UpperCamelCase__ = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase__ = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self , snake_case_ ) -> Tuple:
UpperCamelCase__ = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase__ = current.get_next()
return False
def __iter__( self ) -> Dict:
return LinkedListIterator(self.head )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
if self.head:
return self.head.get_data()
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
if self.tail:
return self.tail.get_data()
return None
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
if self.head is None:
UpperCamelCase__ = node
UpperCamelCase__ = node
else:
self.insert_before_node(self.head , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
UpperCamelCase__ = node
UpperCamelCase__ = node.previous
if node.get_previous() is None:
UpperCamelCase__ = node_to_insert
else:
UpperCamelCase__ = node_to_insert
UpperCamelCase__ = node_to_insert
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
UpperCamelCase__ = node
UpperCamelCase__ = node.next
if node.get_next() is None:
UpperCamelCase__ = node_to_insert
else:
UpperCamelCase__ = node_to_insert
UpperCamelCase__ = node_to_insert
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
UpperCamelCase__ = 1
UpperCamelCase__ = Node(snake_case_ )
UpperCamelCase__ = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase__ = node.next
self.insert_after_node(self.tail , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Node:
UpperCamelCase__ = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase__ = node.get_next()
raise Exception('Node not found' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Dict:
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase__ = self.head.get_next()
if node == self.tail:
UpperCamelCase__ = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> None:
if node.get_next():
UpperCamelCase__ = node.previous
if node.get_previous():
UpperCamelCase__ = node.next
UpperCamelCase__ = None
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
return self.head is None
def lowerCAmelCase_( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20 | 1 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
UpperCamelCase__ = chkpt['model']
# We have the base model one level deeper than the original XLM repository
UpperCamelCase__ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCamelCase__ = v
else:
UpperCamelCase__ = v
UpperCamelCase__ = chkpt['params']
UpperCamelCase__ = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray) )}
UpperCamelCase__ = chkpt['dico_word2id']
UpperCamelCase__ = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCamelCase__ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
UpperCamelCase__ = pytorch_dump_folder_path + '/' + CONFIG_NAME
UpperCamelCase__ = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + '\n' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + '\n' )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A__ : Optional[Any]= parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 20 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'Input value of [number={number}] must be an integer'
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 0:
return False
UpperCamelCase__ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
A__ : int= """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowerCAmelCase_( ) -> None:
"""simple docstring"""
UpperCamelCase__ = input('Enter message: ' )
UpperCamelCase__ = input('Enter key [alphanumeric]: ' )
UpperCamelCase__ = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
UpperCamelCase__ = 'encrypt'
UpperCamelCase__ = encrypt_message(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif mode.lower().startswith('d' ):
UpperCamelCase__ = 'decrypt'
UpperCamelCase__ = decrypt_message(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F'\n{mode.title()}ed message:' )
print(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return translate_message(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 'encrypt' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return translate_message(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 'decrypt' )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = key.upper()
for symbol in message:
UpperCamelCase__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(SCREAMING_SNAKE_CASE )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 0
else:
translated.append(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
A__ : Dict= [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ : Union[str, Any]= [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ : Optional[int]= {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
assert len(str(SCREAMING_SNAKE_CASE ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCamelCase__ = year // 1_00
UpperCamelCase__ = (5 * (century % 4) + 2) % 7
UpperCamelCase__ = year % 1_00
UpperCamelCase__ = centurian % 12
UpperCamelCase__ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCamelCase__ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCamelCase__ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : List[Any]= ["""bert-base-uncased""", """bert-base-cased"""]
A__ : Optional[int]= """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __lowerCamelCase ( tf.keras.Model ):
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase__ = TFAutoModel.from_config(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = self.tokenizer(snake_case_ )
UpperCamelCase__ = self.bert(**snake_case_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case_ , use_fast_bert_tokenizer=snake_case_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(snake_case_ , return_tensors='tf' , padding='longest' )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(snake_case_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(snake_case_ )
UpperCamelCase__ = compiled_tokenizer(snake_case_ )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(snake_case_ ) / 'saved.model'
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = loaded_model(snake_case_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 20 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = BlipImageProcessor()
UpperCamelCase__ = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
UpperCamelCase__ = BlipProcessor(snake_case_ , snake_case_ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase__ = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase__ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(snake_case_ , return_tensors='np' )
UpperCamelCase__ = processor(images=snake_case_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase__ = 'lower newer'
UpperCamelCase__ = processor(text=snake_case_ )
UpperCamelCase__ = tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase__ = 'lower newer'
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(snake_case_ )
UpperCamelCase__ = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase__ = 'lower newer'
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=snake_case_ , images=snake_case_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 20 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 20 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[Any]= logging.get_logger(__name__)
A__ : List[str]= {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __lowerCamelCase ( _a ):
a : List[str] ="""pix2struct_text_model"""
a : Any =["""past_key_values"""]
a : str ={
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case_=5_0244 , snake_case_=768 , snake_case_=64 , snake_case_=2048 , snake_case_=12 , snake_case_=12 , snake_case_=32 , snake_case_=128 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=1.0 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=False , snake_case_=0 , snake_case_=1 , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[Any]:
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = d_kv
UpperCamelCase__ = d_ff
UpperCamelCase__ = num_layers
UpperCamelCase__ = num_heads
UpperCamelCase__ = relative_attention_num_buckets
UpperCamelCase__ = relative_attention_max_distance
UpperCamelCase__ = dropout_rate
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = use_cache
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = decoder_start_token_id
# for backwards compatibility
UpperCamelCase__ = dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCamelCase__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __lowerCamelCase ( _a ):
a : Tuple ="""pix2struct_vision_model"""
def __init__( self , snake_case_=768 , snake_case_=768 , snake_case_=2048 , snake_case_=64 , snake_case_=12 , snake_case_=12 , snake_case_="gelu_new" , snake_case_=1E-6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=1E-10 , snake_case_=1.0 , snake_case_=4096 , snake_case_=32 , snake_case_=128 , **snake_case_ , ) -> Union[str, Any]:
super().__init__(**snake_case_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = patch_embed_hidden_size
UpperCamelCase__ = d_ff
UpperCamelCase__ = dropout_rate
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = initializer_range
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = dense_act_fn
UpperCamelCase__ = seq_len
UpperCamelCase__ = relative_attention_num_buckets
UpperCamelCase__ = relative_attention_max_distance
UpperCamelCase__ = d_kv
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCamelCase__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __lowerCamelCase ( _a ):
a : Any ="""pix2struct"""
a : Any =True
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=1.0 , snake_case_=0.02 , snake_case_=False , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ )
if text_config is None:
UpperCamelCase__ = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCamelCase__ = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCamelCase__ = PixaStructTextConfig(**snake_case_ )
UpperCamelCase__ = PixaStructVisionConfig(**snake_case_ )
UpperCamelCase__ = self.text_config.decoder_start_token_id
UpperCamelCase__ = self.text_config.pad_token_id
UpperCamelCase__ = self.text_config.eos_token_id
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = initializer_range
UpperCamelCase__ = self.initializer_range
UpperCamelCase__ = self.initializer_range
UpperCamelCase__ = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.text_config.to_dict()
UpperCamelCase__ = self.vision_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 20 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : str= {"""vocab_file""": """spiece.model"""}
A__ : Union[str, Any]= {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ : Union[str, Any]= {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
A__ : Optional[Any]= """▁"""
class __lowerCamelCase ( _a ):
a : Dict =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=100 , snake_case_=None , snake_case_ = None , snake_case_=True , **snake_case_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case_ : bool('extra_id' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case_ , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case_ )) + [1]
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return list(
set(filter(lambda snake_case_ : bool(re.search(r'<extra_id_\d+>' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return [self._convert_token_to_id(snake_case_ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[int]:
if len(snake_case_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , snake_case_ ) -> Any:
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(snake_case_ , ' ' )
return super().tokenize(snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[Any]:
if not self.legacy:
UpperCamelCase__ = text.startswith(snake_case_ )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case_ ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
if token.startswith('<extra_id_' ):
UpperCamelCase__ = re.match(r'<extra_id_(\d+)>' , snake_case_ )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(snake_case_ )
else:
UpperCamelCase__ = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = []
UpperCamelCase__ = ''
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(snake_case_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , 'wb' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 20 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , ) -> Optional[int]:
UpperCamelCase__ = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Dict =DPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = DPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case_ , 'image_std' ) )
self.assertTrue(hasattr(snake_case_ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case_ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case_ , 'size' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 20 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A__ : Any= TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(snake_case_ ) , self.find_set(snake_case_ ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# add an edge with the given weight
self.add_node(snake_case_ )
self.add_node(snake_case_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ , snake_case_ , snake_case_ )
disjoint_set.union(snake_case_ , snake_case_ )
return graph
| 20 | 1 |
"""simple docstring"""
from itertools import product
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = sides_number
UpperCamelCase__ = max_face_number * dice_number
UpperCamelCase__ = [0] * (max_total + 1)
UpperCamelCase__ = 1
UpperCamelCase__ = range(SCREAMING_SNAKE_CASE , max_face_number + 1 )
for dice_numbers in product(SCREAMING_SNAKE_CASE , repeat=SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = sum(SCREAMING_SNAKE_CASE )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCAmelCase_( ) -> float:
"""simple docstring"""
UpperCamelCase__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
UpperCamelCase__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
UpperCamelCase__ = 0
UpperCamelCase__ = 9
UpperCamelCase__ = 4 * 9
UpperCamelCase__ = 6
for peter_total in range(SCREAMING_SNAKE_CASE , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCamelCase__ = (4**9) * (6**6)
UpperCamelCase__ = peter_wins_count / total_games_number
UpperCamelCase__ = round(SCREAMING_SNAKE_CASE , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
"""simple docstring"""
A__ : Tuple= """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__ : int= """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : Any= """src/diffusers"""
# Matches is_xxx_available()
A__ : Tuple= re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A__ : Any= re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A__ : Optional[Any]= """
{0} = None
"""
A__ : List[Any]= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A__ : Dict= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> str:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
UpperCamelCase__ = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F'dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[int]= parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 | 1 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
A__ : List[str]= None
try:
import msvcrt
except ImportError:
A__ : Dict= None
try:
import fcntl
except ImportError:
A__ : Dict= None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
A__ : Optional[int]= OSError
# Data
# ------------------------------------------------
A__ : str= [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
A__ : Union[str, Any]= """3.0.12"""
A__ : Any= None
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
global _logger
UpperCamelCase__ = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ ) -> Any:
UpperCamelCase__ = lock_file
return None
def __str__( self ) -> Optional[int]:
UpperCamelCase__ = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __lowerCamelCase :
def __init__( self , snake_case_ ) -> List[Any]:
UpperCamelCase__ = lock
return None
def __enter__( self ) -> int:
return self.lock
def __exit__( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
self.lock.release()
return None
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ) -> Dict:
UpperCamelCase__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase__ = self.hash_filename_if_too_long(snake_case_ , snake_case_ )
# The path to the lock file.
UpperCamelCase__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase__ = None
# The default timeout value.
UpperCamelCase__ = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase__ = 0
return None
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
return self._lock_file
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = float(snake_case_ )
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
raise NotImplementedError()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=None , snake_case_=0.05 ) -> int:
# Use the default timeout, if no timeout is provided.
if timeout is None:
UpperCamelCase__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase__ = id(self )
UpperCamelCase__ = self._lock_file
UpperCamelCase__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(snake_case_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase__ = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=False ) -> Tuple:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase__ = id(self )
UpperCamelCase__ = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
UpperCamelCase__ = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ) -> List[Any]:
self.acquire()
return self
def __exit__( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
self.release()
return None
def __del__( self ) -> Optional[Any]:
self.release(force=snake_case_ )
return None
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = os.path.basename(snake_case_ )
if len(snake_case_ ) > max_length and max_length > 0:
UpperCamelCase__ = os.path.dirname(snake_case_ )
UpperCamelCase__ = str(hash(snake_case_ ) )
UpperCamelCase__ = filename[: max_length - len(snake_case_ ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(snake_case_ , snake_case_ )
else:
return path
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
UpperCamelCase__ = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase__ = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
try:
msvcrt.locking(snake_case_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(snake_case_ )
else:
UpperCamelCase__ = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self._lock_file_fd
UpperCamelCase__ = None
msvcrt.locking(snake_case_ , msvcrt.LK_UNLCK , 1 )
os.close(snake_case_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ) -> int:
UpperCamelCase__ = os.statvfs(os.path.dirname(snake_case_ ) ).f_namemax
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase__ = os.open(self._lock_file , snake_case_ )
try:
fcntl.flock(snake_case_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(snake_case_ )
else:
UpperCamelCase__ = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
UpperCamelCase__ = self._lock_file_fd
UpperCamelCase__ = None
fcntl.flock(snake_case_ , fcntl.LOCK_UN )
os.close(snake_case_ )
return None
class __lowerCamelCase ( _a ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase__ = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
UpperCamelCase__ = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
os.close(self._lock_file_fd )
UpperCamelCase__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
A__ : Union[str, Any]= None
if msvcrt:
A__ : List[Any]= WindowsFileLock
elif fcntl:
A__ : Optional[Any]= UnixFileLock
else:
A__ : Dict= SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 20 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , ) -> List[Any]:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_choices
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_attention_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = True
UpperCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Tuple =True
a : Tuple =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = FlaxRobertaModelTester(self )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('roberta-base' , from_pt=snake_case_ )
UpperCamelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
| 20 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20 | 1 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A__ : Dict= get_logger(__name__)
A__ : Any= Path(__file__).parent / """model_card_template.md"""
A__ : int= uuida().hex
A__ : List[str]= os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
A__ : List[Any]= os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
A__ : Tuple= HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = None ) -> str:
"""simple docstring"""
UpperCamelCase__ = F'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'; torch/{_torch_version}'
if is_flax_available():
ua += F'; jax/{_jax_version}'
ua += F'; flax/{_flax_version}'
if is_onnx_available():
ua += F'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
ua += "; " + "; ".join(F'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
ua += "; " + user_agent
return ua
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None ) -> Union[str, Any]:
"""simple docstring"""
if token is None:
UpperCamelCase__ = HfFolder.get_token()
if organization is None:
UpperCamelCase__ = whoami(SCREAMING_SNAKE_CASE )['name']
return F'{username}/{model_id}'
else:
return F'{organization}/{model_id}'
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(SCREAMING_SNAKE_CASE , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
UpperCamelCase__ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE , 'hub_token' ) else None
UpperCamelCase__ = get_full_repo_name(SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE , model_name=SCREAMING_SNAKE_CASE , repo_name=SCREAMING_SNAKE_CASE , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
UpperCamelCase__ = os.path.join(args.output_dir , 'README.md' )
model_card.save(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> str:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCamelCase__ = str(Path(SCREAMING_SNAKE_CASE ).as_posix() )
UpperCamelCase__ = re.search(r'snapshots/([^/]+)/' , SCREAMING_SNAKE_CASE )
if search is None:
return None
UpperCamelCase__ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A__ : List[str]= os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
A__ : Optional[Any]= os.path.join(hf_cache_home, """diffusers""")
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
UpperCamelCase__ = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCamelCase__ = old_diffusers_cache
UpperCamelCase__ = Path(SCREAMING_SNAKE_CASE ).expanduser()
UpperCamelCase__ = Path(SCREAMING_SNAKE_CASE ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCamelCase__ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
os.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
try:
os.symlink(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A__ : str= os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
A__ : Any= 0
else:
with open(cache_version_file) as f:
try:
A__ : Optional[int]= int(f.read())
except ValueError:
A__ : Optional[Any]= 0
if cache_version < 1:
A__ : Union[str, Any]= os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
A__ : Optional[Any]= """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> str:
"""simple docstring"""
if variant is not None:
UpperCamelCase__ = weights_name.split('.' )
UpperCamelCase__ = splits[:-1] + [variant] + splits[-1:]
UpperCamelCase__ = '.'.join(SCREAMING_SNAKE_CASE )
return weights_name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , *,
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = str(SCREAMING_SNAKE_CASE )
if os.path.isfile(SCREAMING_SNAKE_CASE ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
# Load from a PyTorch checkpoint
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return model_file
else:
raise EnvironmentError(
F'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE ).base_version ) >= version.parse('0.20.0' )
):
try:
UpperCamelCase__ = hf_hub_download(
SCREAMING_SNAKE_CASE , filename=_add_variant(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , user_agent=SCREAMING_SNAKE_CASE , subfolder=SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
warnings.warn(
F'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , SCREAMING_SNAKE_CASE , )
return model_file
except: # noqa: E722
warnings.warn(
F'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}\' so that the correct variant file can be added.' , SCREAMING_SNAKE_CASE , )
try:
# 2. Load model file as usual
UpperCamelCase__ = hf_hub_download(
SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , user_agent=SCREAMING_SNAKE_CASE , subfolder=SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
'this model name. Check the model page at '
F'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
F'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
F'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
F' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
F' directory containing a file named {weights_name} or'
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
F'containing a file named {weights_name}' )
| 20 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
UpperCamelCase__ = parent
UpperCamelCase__ = 13
UpperCamelCase__ = 7
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = 99
UpperCamelCase__ = 384
UpperCamelCase__ = 2
UpperCamelCase__ = 4
UpperCamelCase__ = 37
UpperCamelCase__ = 'gelu'
UpperCamelCase__ = 0.1
UpperCamelCase__ = 0.1
UpperCamelCase__ = 512
UpperCamelCase__ = 16
UpperCamelCase__ = 2
UpperCamelCase__ = 0.02
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 128
UpperCamelCase__ = 2
UpperCamelCase__ = 9
UpperCamelCase__ = 1
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertForMaskedLM(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForSequenceClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFConvBertForMultipleChoice(config=snake_case_ )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForTokenClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = TFConvBertForQuestionAnswering(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : str =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Any =False
a : Dict =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = TFConvBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = True
if hasattr(snake_case_ , 'use_cache' ):
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
UpperCamelCase__ = os.path.join(snake_case_ , 'saved_model' , '1' )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = outputs['encoder_hidden_states']
UpperCamelCase__ = outputs['encoder_attentions']
else:
UpperCamelCase__ = outputs['hidden_states']
UpperCamelCase__ = outputs['attentions']
self.assertEqual(len(snake_case_ ) , snake_case_ )
UpperCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
UpperCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(snake_case_ )[0]
UpperCamelCase__ = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 20 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : List[str]= {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class __lowerCamelCase ( _a ):
a : Tuple ="""ibert"""
def __init__( self , snake_case_=3_0522 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=False , snake_case_="none" , **snake_case_ , ) -> Optional[int]:
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = quant_mode
UpperCamelCase__ = force_dequant
class __lowerCamelCase ( _a ):
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 20 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import datasets
A__ : Tuple= """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
A__ : Any= """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
A__ : int= """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
| 20 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ , UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase__ = 'A painting of a squirrel eating a burger'
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = num_samples * [prompt]
UpperCamelCase__ = sd_pipe.prepare_inputs(snake_case_ )
UpperCamelCase__ = replicate(snake_case_ )
UpperCamelCase__ = shard(snake_case_ )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(snake_case_ , jax.device_count() )
UpperCamelCase__ = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=25 , jit=snake_case_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 253:256, 253:256, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = 'stabilityai/stable-diffusion-2'
UpperCamelCase__ , UpperCamelCase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(snake_case_ , subfolder='scheduler' )
UpperCamelCase__ , UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
snake_case_ , scheduler=snake_case_ , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase__ = scheduler_params
UpperCamelCase__ = 'A painting of a squirrel eating a burger'
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = num_samples * [prompt]
UpperCamelCase__ = sd_pipe.prepare_inputs(snake_case_ )
UpperCamelCase__ = replicate(snake_case_ )
UpperCamelCase__ = shard(snake_case_ )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(snake_case_ , jax.device_count() )
UpperCamelCase__ = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=25 , jit=snake_case_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 253:256, 253:256, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 20 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 20 |
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A__ : Any= logging.getLogger(__name__)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
a : str =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] =field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
a : str =field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
a : str =field(metadata={"""help""": """Should contain the data files for the task."""} )
a : int =field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool =field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCAmelCase_( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
UpperCamelCase__ = processors[data_args.task_name]()
UpperCamelCase__ = processor.get_labels()
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
UpperCamelCase__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE , p.label_ids )}
# Data collator
UpperCamelCase__ = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase__ = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCamelCase__ = trainer.evaluate()
UpperCamelCase__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
return results
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 20 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
assert x is not None
assert y is not None
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
# declaring the array for storing the dp values
UpperCamelCase__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
UpperCamelCase__ = 1 if x[i - 1] == y[j - 1] else 0
UpperCamelCase__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
UpperCamelCase__ = ''
UpperCamelCase__ , UpperCamelCase__ = m, n
while i > 0 and j > 0:
UpperCamelCase__ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
UpperCamelCase__ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
A__ : Optional[Any]= """AGGTAB"""
A__ : Dict= """GXTXAYB"""
A__ : Any= 4
A__ : Optional[int]= """GTAB"""
A__, A__ : List[str]= longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE ) as metadata_file:
UpperCamelCase__ = json.load(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
# Load the entity vocab file
UpperCamelCase__ = load_entity_vocab(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase__ = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
UpperCamelCase__ = state_dict['embeddings.word_embeddings.weight']
UpperCamelCase__ = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
UpperCamelCase__ = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase__ = F'encoder.layer.{layer_index}.attention.self.'
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase__ = state_dict['entity_embeddings.entity_embeddings.weight']
UpperCamelCase__ = entity_emb[entity_vocab['[MASK]']]
UpperCamelCase__ = LukeModel(config=SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if not (len(SCREAMING_SNAKE_CASE ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(SCREAMING_SNAKE_CASE )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
UpperCamelCase__ = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , task='entity_classification' )
UpperCamelCase__ = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
UpperCamelCase__ = (39, 42)
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE , entity_spans=[span] , add_prefix_space=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
UpperCamelCase__ = torch.Size((1, 42, 10_24) )
UpperCamelCase__ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
UpperCamelCase__ = torch.Size((1, 42, 7_68) )
UpperCamelCase__ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
UpperCamelCase__ = torch.Size((1, 1, 10_24) )
UpperCamelCase__ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
UpperCamelCase__ = torch.Size((1, 1, 7_68) )
UpperCamelCase__ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE ) )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = {}
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ , UpperCamelCase__ = line.rstrip().split('\t' )
UpperCamelCase__ = index
return entity_vocab
if __name__ == "__main__":
A__ : Optional[int]= argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
A__ : Optional[int]= parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 20 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=[1, 1, 2] , snake_case_=1 , snake_case_=32 , snake_case_=4 , snake_case_=8 , snake_case_=37 , snake_case_="gelu_new" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=512 , snake_case_=3 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , snake_case_=False , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = block_sizes
UpperCamelCase__ = num_decoder_layers
UpperCamelCase__ = d_model
UpperCamelCase__ = n_head
UpperCamelCase__ = d_head
UpperCamelCase__ = d_inner
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = 2
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
UpperCamelCase__ = n_head
# Used in the tests to check the size of the first hidden state
UpperCamelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
UpperCamelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
UpperCamelCase__ = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Optional[int]:
UpperCamelCase__ = TFFunnelModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
UpperCamelCase__ = False
UpperCamelCase__ = TFFunnelModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
UpperCamelCase__ = False
UpperCamelCase__ = TFFunnelModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Any:
UpperCamelCase__ = TFFunnelBaseModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
UpperCamelCase__ = False
UpperCamelCase__ = TFFunnelBaseModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
UpperCamelCase__ = False
UpperCamelCase__ = TFFunnelBaseModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Union[str, Any]:
UpperCamelCase__ = TFFunnelForPreTraining(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Union[str, Any]:
UpperCamelCase__ = TFFunnelForMaskedLM(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Optional[Any]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFFunnelForSequenceClassification(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Optional[int]:
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFFunnelForMultipleChoice(config=snake_case_ )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> List[Any]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFFunnelForTokenClassification(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Union[str, Any]:
UpperCamelCase__ = TFFunnelForQuestionAnswering(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : str =(
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
a : Tuple =(
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
a : str =False
a : Optional[Any] =False
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = TFFunnelModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@require_tf
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Optional[Any] =(
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
a : Optional[int] =False
a : Union[str, Any] =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = TFFunnelModelTester(self , base=snake_case_ )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
| 20 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = int(number**0.5 )
return number == sq * sq
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
"""simple docstring"""
UpperCamelCase__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ = x_den * y_den * z_den
UpperCamelCase__ = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 35 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = 42
UpperCamelCase__ = Fraction(0 )
UpperCamelCase__ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ = x_num * y_den + x_den * y_num
UpperCamelCase__ = x_den * y_den
UpperCamelCase__ = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
UpperCamelCase__ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ = x_den * x_den * y_den * y_den
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = int(sqrt(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = int(sqrt(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=-1
UpperCamelCase__ = x_num * y_num
UpperCamelCase__ = x_den * y_num + x_num * y_den
UpperCamelCase__ = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
UpperCamelCase__ = x_num * x_num * y_num * y_num
UpperCamelCase__ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = int(sqrt(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = int(sqrt(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 | 1 |
"""simple docstring"""
A__ : Union[str, Any]= tuple[float, float, float]
A__ : Dict= tuple[float, float, float]
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Vectorad:
"""simple docstring"""
UpperCamelCase__ = end_pointa[0] - end_pointa[0]
UpperCamelCase__ = end_pointa[1] - end_pointa[1]
UpperCamelCase__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Vectorad:
"""simple docstring"""
UpperCamelCase__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCamelCase__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCamelCase__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return tuple(round(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for x in vector ) == (0, 0, 0)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 10 ) -> bool:
"""simple docstring"""
UpperCamelCase__ = create_vector(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = create_vector(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return is_zero_vector(get_ad_vectors_cross(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCamelCase ( _a , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = ort.SessionOptions()
UpperCamelCase__ = False
return options
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
UpperCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
UpperCamelCase__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = 'A red cat sitting on a park bench'
UpperCamelCase__ = np.random.RandomState(0 )
UpperCamelCase__ = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type='np' , )
UpperCamelCase__ = output.images
UpperCamelCase__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
UpperCamelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
UpperCamelCase__ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
UpperCamelCase__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = 'A red cat sitting on a park bench'
UpperCamelCase__ = np.random.RandomState(0 )
UpperCamelCase__ = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type='np' , )
UpperCamelCase__ = output.images
UpperCamelCase__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
"""simple docstring"""
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = name
UpperCamelCase__ = val
def __str__( self ) -> Any:
return F'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self , snake_case_ ) -> List[Any]:
return self.val < other.val
class __lowerCamelCase :
def __init__( self , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = {}
UpperCamelCase__ = {}
UpperCamelCase__ = self.build_heap(snake_case_ )
def __getitem__( self , snake_case_ ) -> Optional[Any]:
return self.get_value(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Union[str, Any]:
return (idx - 1) // 2
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> str:
return idx * 2 + 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Union[str, Any]:
return idx * 2 + 2
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[Any]:
return self.heap_dict[key]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> str:
UpperCamelCase__ = len(snake_case_ ) - 1
UpperCamelCase__ = self.get_parent_idx(snake_case_ )
for idx, i in enumerate(snake_case_ ):
UpperCamelCase__ = idx
UpperCamelCase__ = i.val
for i in range(snake_case_ , -1 , -1 ):
self.sift_down(snake_case_ , snake_case_ )
return array
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
while True:
UpperCamelCase__ = self.get_left_child_idx(snake_case_ ) # noqa: E741
UpperCamelCase__ = self.get_right_child_idx(snake_case_ )
UpperCamelCase__ = idx
if l < len(snake_case_ ) and array[l] < array[idx]:
UpperCamelCase__ = l
if r < len(snake_case_ ) and array[r] < array[smallest]:
UpperCamelCase__ = r
if smallest != idx:
UpperCamelCase__ , UpperCamelCase__ = array[smallest], array[idx]
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCamelCase__ = smallest
else:
break
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> str:
UpperCamelCase__ = self.get_parent_idx(snake_case_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCamelCase__ , UpperCamelCase__ = self.heap[idx], self.heap[p]
UpperCamelCase__ , UpperCamelCase__ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCamelCase__ = p
UpperCamelCase__ = self.get_parent_idx(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
return self.heap[0]
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ , UpperCamelCase__ = self.heap[-1], self.heap[0]
UpperCamelCase__ , UpperCamelCase__ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCamelCase__ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
self.heap.append(snake_case_ )
UpperCamelCase__ = len(self.heap ) - 1
UpperCamelCase__ = node.val
self.sift_up(len(self.heap ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
return len(self.heap ) == 0
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Tuple:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCamelCase__ = new_value
UpperCamelCase__ = new_value
self.sift_up(self.idx_of_element[node] )
A__ : Any= Node("""R""", -1)
A__ : Any= Node("""B""", 6)
A__ : Optional[int]= Node("""A""", 3)
A__ : List[str]= Node("""X""", 1)
A__ : int= Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A__ : str= MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : List[Any]= ["""bert-base-uncased""", """bert-base-cased"""]
A__ : Optional[int]= """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __lowerCamelCase ( tf.keras.Model ):
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase__ = TFAutoModel.from_config(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = self.tokenizer(snake_case_ )
UpperCamelCase__ = self.bert(**snake_case_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case_ , use_fast_bert_tokenizer=snake_case_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(snake_case_ , return_tensors='tf' , padding='longest' )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(snake_case_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(snake_case_ )
UpperCamelCase__ = compiled_tokenizer(snake_case_ )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(snake_case_ ) / 'saved.model'
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = loaded_model(snake_case_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 20 | 1 |
"""simple docstring"""
import math
import unittest
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
with self.assertRaises(snake_case_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
UpperCamelCase__ = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = temp * (u - i)
return temp
def lowerCAmelCase_( ) -> None:
"""simple docstring"""
UpperCamelCase__ = int(input('enter the numbers of values: ' ) )
UpperCamelCase__ = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 0
print('enter the values of parameters in a list: ' )
UpperCamelCase__ = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = float(input() )
UpperCamelCase__ = int(input('enter the value to interpolate: ' ) )
UpperCamelCase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
UpperCamelCase__ = y[j + 1][i - 1] - y[j][i - 1]
UpperCamelCase__ = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 20 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 20 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A__ : Tuple= logging.get_logger(__name__) # pylint: disable=invalid-name
A__ : List[str]= """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=8 ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5_12 , SCREAMING_SNAKE_CASE=5_12 ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCamelCase__ = np.array(pil_image.convert('RGB' ) )
UpperCamelCase__ = arr.astype(np.floataa ) / 127.5 - 1
UpperCamelCase__ = np.transpose(SCREAMING_SNAKE_CASE , [2, 0, 1] )
UpperCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
return image
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=snake_case_ , scheduler=snake_case_ , movq=snake_case_ , )
UpperCamelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
# get the original timestep using init_timestep
UpperCamelCase__ = min(int(num_inference_steps * strength ) , snake_case_ )
UpperCamelCase__ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> str:
if not isinstance(snake_case_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case_ )}' )
UpperCamelCase__ = image.to(device=snake_case_ , dtype=snake_case_ )
UpperCamelCase__ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCamelCase__ = image
else:
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(snake_case_ )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case_ )
]
UpperCamelCase__ = torch.cat(snake_case_ , dim=0 )
else:
UpperCamelCase__ = self.movq.encode(snake_case_ ).latent_dist.sample(snake_case_ )
UpperCamelCase__ = self.movq.config.scaling_factor * init_latents
UpperCamelCase__ = torch.cat([init_latents] , dim=0 )
UpperCamelCase__ = init_latents.shape
UpperCamelCase__ = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
# get latents
UpperCamelCase__ = self.scheduler.add_noise(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCamelCase__ = torch.device(F'cuda:{gpu_id}' )
UpperCamelCase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCamelCase__ = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase__ , UpperCamelCase__ = cpu_offload_with_hook(snake_case_ , snake_case_ , prev_module_hook=snake_case_ )
# We'll offload the last model manually.
UpperCamelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 512 , snake_case_ = 512 , snake_case_ = 100 , snake_case_ = 4.0 , snake_case_ = 0.3 , snake_case_ = 1 , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , ) -> List[str]:
UpperCamelCase__ = self._execution_device
UpperCamelCase__ = guidance_scale > 1.0
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = torch.cat(snake_case_ , dim=0 )
UpperCamelCase__ = image_embeds.shape[0]
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = torch.cat(snake_case_ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ = image_embeds.repeat_interleave(snake_case_ , dim=0 )
UpperCamelCase__ = negative_image_embeds.repeat_interleave(snake_case_ , dim=0 )
UpperCamelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = [image]
if not all(isinstance(snake_case_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'Input is in incorrect format: {[type(snake_case_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
UpperCamelCase__ = torch.cat([prepare_image(snake_case_ , snake_case_ , snake_case_ ) for i in image] , dim=0 )
UpperCamelCase__ = image.to(dtype=image_embeds.dtype , device=snake_case_ )
UpperCamelCase__ = self.movq.encode(snake_case_ )['latents']
UpperCamelCase__ = latents.repeat_interleave(snake_case_ , dim=0 )
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = self.get_timesteps(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCamelCase__ , UpperCamelCase__ = downscale_height_and_width(snake_case_ , snake_case_ , self.movq_scale_factor )
UpperCamelCase__ = self.prepare_latents(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , image_embeds.dtype , snake_case_ , snake_case_ )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ = {'image_embeds': image_embeds}
UpperCamelCase__ = self.unet(
sample=snake_case_ , timestep=snake_case_ , encoder_hidden_states=snake_case_ , added_cond_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase__ , UpperCamelCase__ = noise_pred.chunk(2 )
UpperCamelCase__ , UpperCamelCase__ = variance_pred.chunk(2 )
UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase__ , UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(
snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ , )[0]
# post-processing
UpperCamelCase__ = self.movq.decode(snake_case_ , force_not_quantize=snake_case_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
UpperCamelCase__ = image * 0.5 + 0.5
UpperCamelCase__ = image.clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 20 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : str= {"""vocab_file""": """spiece.model"""}
A__ : Union[str, Any]= {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ : Union[str, Any]= {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
A__ : Optional[Any]= """▁"""
class __lowerCamelCase ( _a ):
a : Dict =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=100 , snake_case_=None , snake_case_ = None , snake_case_=True , **snake_case_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case_ : bool('extra_id' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case_ , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case_ )) + [1]
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return list(
set(filter(lambda snake_case_ : bool(re.search(r'<extra_id_\d+>' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return [self._convert_token_to_id(snake_case_ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[int]:
if len(snake_case_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , snake_case_ ) -> Any:
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(snake_case_ , ' ' )
return super().tokenize(snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[Any]:
if not self.legacy:
UpperCamelCase__ = text.startswith(snake_case_ )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case_ ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
if token.startswith('<extra_id_' ):
UpperCamelCase__ = re.match(r'<extra_id_(\d+)>' , snake_case_ )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(snake_case_ )
else:
UpperCamelCase__ = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = []
UpperCamelCase__ = ''
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(snake_case_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , 'wb' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 20 | 1 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *snake_case_ , **snake_case_ ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCamelCase__ = image_classifier(snake_case_ , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(snake_case_ ) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
UpperCamelCase__ = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
],
[
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
],
[
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
],
[
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
],
[
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCamelCase__ = image_classifier(snake_case_ , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(snake_case_ ) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
UpperCamelCase__ = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
],
[
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
],
[
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
],
[
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
],
[
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
{'score': 0.333, 'label': ANY(snake_case_ )},
],
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCamelCase__ = image_classifier(snake_case_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
UpperCamelCase__ = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCamelCase__ = image_classifier(snake_case_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
UpperCamelCase__ = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 20 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A__ : Any= TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(snake_case_ ) , self.find_set(snake_case_ ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# add an edge with the given weight
self.add_node(snake_case_ )
self.add_node(snake_case_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ , snake_case_ , snake_case_ )
disjoint_set.union(snake_case_ , snake_case_ )
return graph
| 20 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
a : Optional[Any] =StableDiffusionLDMaDPipeline
a : int =TEXT_TO_IMAGE_PARAMS
a : Tuple =TEXT_TO_IMAGE_BATCH_PARAMS
a : List[str] =TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase__ = CLIPTextModel(snake_case_ )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_=0 ) -> Dict:
if str(snake_case_ ).startswith('mps' ):
UpperCamelCase__ = torch.manual_seed(snake_case_ )
else:
UpperCamelCase__ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableDiffusionLDMaDPipeline(**snake_case_ )
UpperCamelCase__ = ldmad_pipe.to(snake_case_ )
ldmad_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_dummy_inputs(snake_case_ )
UpperCamelCase__ = ldmad_pipe(**snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = output.rgb, output.depth
UpperCamelCase__ = rgb[0, -3:, -3:, -1]
UpperCamelCase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase__ = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
UpperCamelCase__ = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableDiffusionLDMaDPipeline(**snake_case_ )
UpperCamelCase__ = ldmad_pipe.to(snake_case_ )
ldmad_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_dummy_inputs(snake_case_ )
UpperCamelCase__ = 3 * [inputs['prompt']]
# forward
UpperCamelCase__ = ldmad_pipe(**snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = output.rgb, output.depth
UpperCamelCase__ = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase__ = depth_slice_a[0, -3:, -1]
UpperCamelCase__ = self.get_dummy_inputs(snake_case_ )
UpperCamelCase__ = 3 * [inputs.pop('prompt' )]
UpperCamelCase__ = ldmad_pipe.tokenizer(
snake_case_ , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors='pt' , )
UpperCamelCase__ = text_inputs['input_ids'].to(snake_case_ )
UpperCamelCase__ = ldmad_pipe.text_encoder(snake_case_ )[0]
UpperCamelCase__ = prompt_embeds
# forward
UpperCamelCase__ = ldmad_pipe(**snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = output.rgb, output.depth
UpperCamelCase__ = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase__ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=snake_case_ )
UpperCamelCase__ = StableDiffusionLDMaDPipeline(**snake_case_ )
UpperCamelCase__ = ldmad_pipe.to(snake_case_ )
ldmad_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_dummy_inputs(snake_case_ )
UpperCamelCase__ = 'french fries'
UpperCamelCase__ = ldmad_pipe(**snake_case_ , negative_prompt=snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = output.rgb, output.depth
UpperCamelCase__ = rgb[0, -3:, -3:, -1]
UpperCamelCase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase__ = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
UpperCamelCase__ = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_="cpu" , snake_case_=torch.floataa , snake_case_=0 ) -> int:
UpperCamelCase__ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase__ = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 64, 64) )
UpperCamelCase__ = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ )
UpperCamelCase__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
UpperCamelCase__ = ldmad_pipe.to(snake_case_ )
ldmad_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_inputs(snake_case_ )
UpperCamelCase__ = ldmad_pipe(**snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = output.rgb, output.depth
UpperCamelCase__ = rgb[0, -3:, -3:, -1].flatten()
UpperCamelCase__ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
UpperCamelCase__ = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
UpperCamelCase__ = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_="cpu" , snake_case_=torch.floataa , snake_case_=0 ) -> Optional[int]:
UpperCamelCase__ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase__ = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 64, 64) )
UpperCamelCase__ = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ )
UpperCamelCase__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(snake_case_ )
ldmad_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_inputs(snake_case_ )
UpperCamelCase__ = ldmad_pipe(**snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = output.rgb, output.depth
UpperCamelCase__ = 0.495_586
UpperCamelCase__ = 0.33_795_515
UpperCamelCase__ = 112.48_518
UpperCamelCase__ = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(snake_case_ )
ldmad_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_inputs(snake_case_ )
UpperCamelCase__ = ldmad_pipe(**snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = output.rgb, output.depth
UpperCamelCase__ = 0.4_194_127
UpperCamelCase__ = 0.35_375_586
UpperCamelCase__ = 0.5_638_502
UpperCamelCase__ = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 20 |
"""simple docstring"""
A__ : Tuple= """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__ : int= """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
UpperCamelCase__ = sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : Any= """src/diffusers"""
# Matches is_xxx_available()
A__ : Tuple= re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A__ : Any= re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A__ : Optional[Any]= """
{0} = None
"""
A__ : List[Any]= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A__ : Dict= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> str:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
UpperCamelCase__ = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F'dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[int]= parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 | 1 |
"""simple docstring"""
A__ : dict[str, float]= {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.6_0_9_3_4_4,
"knot": 1.8_5_2,
}
A__ : dict[str, float]= {
"km/h": 1.0,
"m/s": 0.2_7_7_7_7_7_7_7_8,
"mph": 0.6_2_1_3_7_1_1_9_2,
"knot": 0.5_3_9_9_5_6_8_0_3,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
UpperCamelCase__ = (
F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
F'Valid values are: {", ".join(SCREAMING_SNAKE_CASE )}'
)
raise ValueError(SCREAMING_SNAKE_CASE )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_a ) , """Tatoeba directory does not exist.""" )
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
self.resolver.convert_models(['heb-eng'] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ , UpperCamelCase__ = self.resolver.write_model_card('opus-mt-he-en' , dry_run=snake_case_ )
assert mmeta["long_pair"] == "heb-eng"
| 20 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Optional[int]= get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Tuple =XLMRobertaTokenizer
a : Dict =XLMRobertaTokenizerFast
a : Tuple =True
a : Optional[int] =True
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = XLMRobertaTokenizer(snake_case_ , keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = '<pad>'
UpperCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case_ ) , 1002 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = XLMRobertaTokenizer(snake_case_ , keep_accents=snake_case_ )
UpperCamelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCamelCase__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = tokenizer_r.save_pretrained(snake_case_ )
UpperCamelCase__ = tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCamelCase__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case_ , snake_case_ )
# Checks everything loads correctly in the same way
UpperCamelCase__ = tokenizer_r.from_pretrained(snake_case_ )
UpperCamelCase__ = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=True
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ )
UpperCamelCase__ = tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case_ , snake_case_ )
# Checks everything loads correctly in the same way
UpperCamelCase__ = tokenizer_r.from_pretrained(snake_case_ )
UpperCamelCase__ = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=False
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ )
UpperCamelCase__ = tokenizer_p.save_pretrained(snake_case_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase__ = tokenizer_r.from_pretrained(snake_case_ )
UpperCamelCase__ = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
shutil.rmtree(snake_case_ )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case_ , f.name )
UpperCamelCase__ = XLMRobertaTokenizer(f.name , keep_accents=snake_case_ )
UpperCamelCase__ = pickle.dumps(snake_case_ )
pickle.loads(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = 'I was born in 92000, and this is falsé.'
UpperCamelCase__ = tokenizer.tokenize(snake_case_ )
UpperCamelCase__ = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase__ = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = tokenizer.encode(snake_case_ )
UpperCamelCase__ = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = 'Hello World!'
UpperCamelCase__ = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
UpperCamelCase__ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
# fmt: off
UpperCamelCase__ = {'input_ids': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 20 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20 | 1 |
"""simple docstring"""
import math
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ = 0
UpperCamelCase__ = 0
while num > 0:
UpperCamelCase__ = num % 8
UpperCamelCase__ = octal + (remainder * math.floor(math.pow(10 , SCREAMING_SNAKE_CASE ) ))
counter += 1
UpperCamelCase__ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(SCREAMING_SNAKE_CASE )}'
def lowerCAmelCase_( ) -> None:
"""simple docstring"""
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(2_16 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 20 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
UpperCamelCase__ = parent
UpperCamelCase__ = 13
UpperCamelCase__ = 7
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = 99
UpperCamelCase__ = 384
UpperCamelCase__ = 2
UpperCamelCase__ = 4
UpperCamelCase__ = 37
UpperCamelCase__ = 'gelu'
UpperCamelCase__ = 0.1
UpperCamelCase__ = 0.1
UpperCamelCase__ = 512
UpperCamelCase__ = 16
UpperCamelCase__ = 2
UpperCamelCase__ = 0.02
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 128
UpperCamelCase__ = 2
UpperCamelCase__ = 9
UpperCamelCase__ = 1
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertForMaskedLM(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForSequenceClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFConvBertForMultipleChoice(config=snake_case_ )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForTokenClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = TFConvBertForQuestionAnswering(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : str =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Any =False
a : Dict =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = TFConvBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = True
if hasattr(snake_case_ , 'use_cache' ):
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
UpperCamelCase__ = os.path.join(snake_case_ , 'saved_model' , '1' )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = outputs['encoder_hidden_states']
UpperCamelCase__ = outputs['encoder_attentions']
else:
UpperCamelCase__ = outputs['hidden_states']
UpperCamelCase__ = outputs['attentions']
self.assertEqual(len(snake_case_ ) , snake_case_ )
UpperCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
UpperCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(snake_case_ )[0]
UpperCamelCase__ = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 20 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=True , snake_case_=1 / 255 , snake_case_=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCamelCase__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_pad
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_=False ) -> int:
if not batched:
UpperCamelCase__ = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ = image.size
else:
UpperCamelCase__ , UpperCamelCase__ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ = int(self.size['shortest_edge'] * h / w )
UpperCamelCase__ = self.size['shortest_edge']
elif w > h:
UpperCamelCase__ = self.size['shortest_edge']
UpperCamelCase__ = int(self.size['shortest_edge'] * w / h )
else:
UpperCamelCase__ = self.size['shortest_edge']
UpperCamelCase__ = self.size['shortest_edge']
else:
UpperCamelCase__ = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
UpperCamelCase__ = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Dict =ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case_ , 'image_std' ) )
self.assertTrue(hasattr(snake_case_ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case_ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case_ , 'size' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , snake_case_ )
UpperCamelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
# prepare image and target
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
UpperCamelCase__ = json.loads(f.read() )
UpperCamelCase__ = {'image_id': 3_9769, 'annotations': target}
# encode them
UpperCamelCase__ = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
UpperCamelCase__ = image_processing(images=snake_case_ , annotations=snake_case_ , return_tensors='pt' )
# verify pixel values
UpperCamelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case_ )
UpperCamelCase__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
UpperCamelCase__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case_ ) )
# verify boxes
UpperCamelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case_ )
UpperCamelCase__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
UpperCamelCase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case_ ) )
# verify is_crowd
UpperCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case_ ) )
# verify class_labels
UpperCamelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case_ ) )
# verify orig_size
UpperCamelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case_ ) )
# verify size
UpperCamelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
# prepare image, target and masks_path
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
UpperCamelCase__ = json.loads(f.read() )
UpperCamelCase__ = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
UpperCamelCase__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
UpperCamelCase__ = ConditionalDetrImageProcessor(format='coco_panoptic' )
UpperCamelCase__ = image_processing(images=snake_case_ , annotations=snake_case_ , masks_path=snake_case_ , return_tensors='pt' )
# verify pixel values
UpperCamelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case_ )
UpperCamelCase__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
UpperCamelCase__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case_ ) )
# verify boxes
UpperCamelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case_ )
UpperCamelCase__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
UpperCamelCase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case_ ) )
# verify is_crowd
UpperCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case_ ) )
# verify class_labels
UpperCamelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case_ ) )
# verify masks
UpperCamelCase__ = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case_ )
# verify orig_size
UpperCamelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case_ ) )
# verify size
UpperCamelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case_ ) )
| 20 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.